prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from State import *
def trajectory2df(t_list, state_list, alpha_list):
x_list = []
vx_list = []
y_list = []
vy_list = []
theta_list = []
for state in state_list:
x_list.append(state.x)
vx_list.append(state.vx)
y_list.append(state.y)
vy_list.append(state.vy)
theta_list.append(state.theta)
return pd.DataFrame({'t' : x_list, 'x' : x_list,
'vx' : vx_list,'y' : y_list,
'vy' : vy_list, 'theta' : theta_list,
'alpha' : alpha_list})
def store_trajectory(t_list, state_list, alpha_list,file_path):
trajectory2df(t_list, state_list, alpha_list).to_csv(file_path)
def load_csv(file_path):
df = | pd.read_csv(file_path) | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
# Choose GBDT Regression model as baseline
# my_model = GradientBoostingRegressor()
# Training Step
def my_train_func(station):
train_data = pd.read_csv('train-dataset/point_date_' + station + '.csv')
train_data_Y = train_data['actualPowerGeneration']
# Drop some non-relative factors
drop_columns = ['longitude', 'latitude', 'RadiationHorizontalPlane', 'Temperature', 'actualPowerGeneration',
'Humidity', 'atmosphericPressure', 'windDirection', 'scatteredRadiation']
train_data_X = train_data.drop(axis=1, columns=drop_columns)
train_data_X['month'] = pd.to_datetime(train_data_X.Time).dt.month
train_data_X['day'] = pd.to_datetime(train_data_X.Time).dt.day
train_data_X['hour'] = pd.to_datetime(train_data_X.Time).dt.hour
train_data_X = train_data_X.drop(axis=1, columns=['Time'])
# Validation
X_train, X_test, Y_train, Y_test = train_test_split(train_data_X, train_data_Y, test_size=0.2, random_state=40)
myGBR = GradientBoostingRegressor(n_estimators=500,max_depth=7)
myGBR.fit(X_train, Y_train)
Y_pred = myGBR.predict(X_test)
# Output model to global variation
# my_model = myGBR
_ = joblib.dump(myGBR, 'model/' + station + '_model.pkl', compress=9)
print('Training completed. MSE on validation set is {}'.format(mean_squared_error(Y_test, Y_pred)))
print('Factors below are used: \n{}'.format(list(X_train.columns)))
def my_spredict_func(station, input_file, output_file):
# Clean test data
columns = 'Time,longitude,latitude,directRadiation,scatterdRadiation,windSpeed,airTransparency,airDensity'
columns = list(columns.split(','))
test_data = pd.read_csv('test-dataset/' + input_file, names=columns)
drop_columns = ['longitude', 'latitude', 'airTransparency', 'airDensity']
test_data = test_data.drop(axis=1, columns=drop_columns)
test_data['month'] = pd.to_datetime(test_data.Time).dt.month
test_data['day'] = pd.to_datetime(test_data.Time).dt.day
test_data['hour'] = pd.to_datetime(test_data.Time).dt.hour
test_data['min'] = pd.to_datetime(test_data.Time).dt.minute
# Find the time point we need to start with
test_data = test_data.sort_values(by='Time')
# Find the latest time point
time_point = test_data[test_data['hour'] == 0][test_data['min'] == 0].index.tolist()[0]
start_point = test_data.loc[time_point]['Time']
observation_period = | pd.date_range(start=start_point, periods=96, freq='15T') | pandas.date_range |
import pandas as pd
import numpy as np
import json
from tqdm import tqdm
from utils import odds, clean_sheet, time_decay, score_mtx, get_next_gw
from ranked_probability_score import ranked_probability_score, match_outcome
import pymc3 as pm
import theano.tensor as tt
class Bayesian:
""" Model scored goals at home and away as Bayesian Random variables """
def __init__(self, games, performance='score', decay=True):
"""
Args:
games (pd.DataFrame): Finished games to used for training.
performance (string): Observed performance metric to use in model
decay (boolean): Apply time decay
"""
teams = np.sort(np.unique(games["team1"]))
league_size = len(teams)
self.teams = (
games.loc[:, ["team1"]]
.drop_duplicates()
.sort_values("team1")
.reset_index(drop=True)
.assign(team_index=np.arange(league_size))
.rename(columns={"team1": "team"})
)
self.league_size = self.teams.shape[0]
df = (
pd.merge(games, self.teams, left_on="team1", right_on="team")
.rename(columns={"team_index": "hg"})
.drop(["team"], axis=1)
.drop_duplicates()
.merge(self.teams, left_on="team2", right_on="team")
.rename(columns={"team_index": "ag"})
.drop(["team"], axis=1)
.sort_values("date")
)
df["date"] = pd.to_datetime(df["date"])
df["days_since"] = (df["date"].max() - df["date"]).dt.days
df["weight"] = time_decay(0.00003, df["days_since"]) if decay else 1
self.decay = decay
# Handle different data to infer
assert performance == 'score' or performance == 'xg'
self.performance = performance
self.games = df.loc[:, [
f"{performance}1", f"{performance}2", "team1", "team2",
"hg", "ag", "weight"]]
self.games = self.games.dropna()
if performance == 'xg':
self.games = (
self.games
.rename(columns={"xg1": "score1", "xg2": "score2"})
)
self.model = self._build_model()
def _build_model(self):
""" Build the model
Returns:
pymc3.Model: untrained model
"""
home_idx, teams = pd.factorize(self.games["team1"], sort=True)
away_idx, _ = pd.factorize(self.games["team2"], sort=True)
with pm.Model() as model:
# constant data
home_team = pm.Data("home_team", home_idx)
away_team = pm.Data("away_team", away_idx)
score1_obs = pm.Data("score1_obs", self.games["score1"])
score2_obs = pm.Data("score2_obs", self.games["score2"])
# global model parameters
home = pm.Normal("home", mu=0, sigma=1)
intercept = pm.Normal("intercept", mu=0, sigma=1)
sd_att = pm.HalfNormal("sd_att", sigma=2)
sd_def = pm.HalfNormal("sd_def", sigma=2)
# team-specific model parameters
atts_star = pm.Normal(
"atts_star",
mu=0,
sigma=sd_att,
shape=self.league_size)
defs_star = pm.Normal(
"defs_star",
mu=0,
sigma=sd_def,
shape=self.league_size)
# apply sum zero constraints
atts = pm.Deterministic(
"atts",
atts_star - tt.mean(atts_star))
defs = pm.Deterministic(
"defs",
defs_star - tt.mean(defs_star))
# calulate theta
home_theta = tt.exp(
intercept + atts[home_team] + defs[away_team] + home)
away_theta = tt.exp(
intercept + atts[away_team] + defs[home_team])
# likelihood of observed data
pm.Potential(
'home_goals',
self.games["weight"].values * pm.Poisson.dist(mu=home_theta).logp(
score1_obs)
)
pm.Potential(
'away_goals',
self.games["weight"].values * pm.Poisson.dist(mu=away_theta).logp(
score2_obs)
)
return model
def fit(self):
"""Fit the model parameters"""
with self.model:
self.trace = pm.sample(
2000,
tune=1000,
cores=6,
return_inferencedata=False,
target_accept=0.85)
def predict(self, games):
"""Predict the outcome of games
Args:
games (pd.DataFrame): Fixtures
Returns:
pd.DataFrame: Fixtures with game odds
"""
parameter_df = (
pd.DataFrame()
.assign(attack=[
np.mean([x[team] for x in self.trace["atts"]])
for team in range(self.league_size)])
.assign(defence=[
np.mean([x[team] for x in self.trace["defs"]])
for team in range(self.league_size)])
.assign(team=np.array(self.teams.team_index.values))
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='hg', right_on='team')
.rename(columns={"attack": "attack1", "defence": "defence1"})
.merge(parameter_df, left_on='ag', right_on='team')
.rename(columns={"attack": "attack2", "defence": "defence2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
.assign(home_adv=np.mean(self.trace["home"]))
.assign(intercept=np.mean([x for x in self.trace["intercept"]]))
)
fixtures_df["score1_infered"] = np.exp(
fixtures_df['intercept'] +
fixtures_df["home_adv"] +
fixtures_df["attack1"] +
fixtures_df["defence2"])
fixtures_df["score2_infered"] = np.exp(
fixtures_df['intercept'] +
fixtures_df["attack2"] +
fixtures_df["defence1"])
def synthesize_odds(row):
""" Lambda function that parses row by row to compute score matrix
Args:
row (array): Fixture
Returns:
(tuple): Home and Away winning and clean sheets odds
"""
m = score_mtx(row["score1_infered"], row["score2_infered"])
home_win_p, draw_p, away_win_p = odds(m)
home_cs_p, away_cs_p = clean_sheet(m)
return home_win_p, draw_p, away_win_p, home_cs_p, away_cs_p
(
fixtures_df["home_win_p"],
fixtures_df["draw_p"],
fixtures_df["away_win_p"],
fixtures_df["home_cs_p"],
fixtures_df["away_cs_p"]
) = zip(*fixtures_df.apply(
lambda row: synthesize_odds(row), axis=1))
return fixtures_df
def predict_posterior(self, games):
"""Predict the outcome of games using posterior sampling
Although I think this method is mathematically more sound,
it gives worst results
Args:
games (pd.DataFrame): Fixtures
Returns:
pd.DataFrame: Fixtures with game odds
"""
with self.model:
pm.set_data(
{
"home_team": games.hg.values,
"away_team": games.ag.values,
"score1_obs": np.repeat(0, games.ag.values.shape[0]),
"score2_obs": np.repeat(0, games.ag.values.shape[0]),
}
)
post_pred = pm.sample_posterior_predictive(self.trace.posterior)
parameter_df = (
pd.DataFrame()
.assign(attack=[
np.mean([x[team] for x in self.trace.posterior["atts"]])
for team in range(self.league_size)])
.assign(defence=[
np.mean([x[team] for x in self.trace.posterior["defs"]])
for team in range(self.league_size)])
.assign(team=np.array(self.teams.team_index.values))
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='hg', right_on='team')
.rename(columns={"attack": "attack1", "defence": "defence1"})
.merge(parameter_df, left_on='ag', right_on='team')
.rename(columns={"attack": "attack2", "defence": "defence2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
.assign(home_adv=np.mean([x for x in self.trace.posterior["home"]]))
.assign(intercept=np.mean([x for x in self.trace.posterior["intercept"]]))
)
fixtures_df["score1_infered"] = post_pred["home_goals"].mean(axis=0)
fixtures_df["score2_infered"] = post_pred["away_goals"].mean(axis=0)
fixtures_df["home_win_p"] = (
(post_pred["home_goals"] > post_pred["away_goals"]).mean(axis=0)
)
fixtures_df["away_win_p"] = (
(post_pred["home_goals"] < post_pred["away_goals"]).mean(axis=0)
)
fixtures_df["draw_p"] = (
(post_pred["home_goals"] == post_pred["away_goals"]).mean(axis=0)
)
return fixtures_df
def evaluate(self, games):
""" Evaluate the model's prediction accuracy
Args:
games (pd.DataFrame): Fixtured to evaluate on
Returns:
pd.DataFrame: df with appended metrics
"""
fixtures_df = self.predict(games)
fixtures_df["winner"] = match_outcome(fixtures_df)
fixtures_df["rps"] = fixtures_df.apply(
lambda row: ranked_probability_score([
row["home_win_p"], row["draw_p"],
row["away_win_p"]], row["winner"]), axis=1)
return fixtures_df
def backtest(self, train_games, test_season, path='', save=True):
""" Test the model's accuracy on past/finished games by iteratively
training and testing on parts of the data.
Args:
train_games (pd.DataFrame): All the training samples
test_season (int): Season to use a test set
path (string): Path extension to adjust to ipynb use
save (boolean): Save predictions to disk
Returns:
(float): Evaluation metric
"""
# Get training data
self.train_games = train_games
# Initialize model
self.__init__(
self.train_games[self.train_games['season'] != test_season],
performance=self.performance,
decay=self.decay)
# Initial train
self.fit()
# Get test data
# Separate testing based on per GW intervals
fixtures = (
pd.read_csv(
f"{path}data/fpl_official/vaastav/data/2021-22/fixtures.csv")
.loc[:, ['event', 'kickoff_time']])
fixtures["kickoff_time"] = (
pd.to_datetime(fixtures["kickoff_time"]).dt.date)
# Get only EPL games from the test season
self.test_games = (
self.train_games
.loc[self.train_games['league_id'] == 2411]
.loc[self.train_games['season'] == test_season]
.dropna()
)
self.test_games["kickoff_time"] = (
pd.to_datetime(self.test_games["date"]).dt.date)
# Merge on date
self.test_games = pd.merge(
self.test_games,
fixtures,
left_on='kickoff_time',
right_on='kickoff_time')
# Add the home team and away team index for running inference
self.test_games = (
pd.merge(
self.test_games,
self.teams,
left_on="team1",
right_on="team")
.rename(columns={"team_index": "hg"})
.drop(["team"], axis=1)
.drop_duplicates()
.merge(self.teams, left_on="team2", right_on="team")
.rename(columns={"team_index": "ag"})
.drop(["team"], axis=1)
.sort_values("date")
)
predictions = | pd.DataFrame() | pandas.DataFrame |
""" Research results class """
import os
from collections import OrderedDict
import glob
import json
import dill
import pandas as pd
class Results:
""" Class for dealing with results of research
Parameters
----------
path : str
path to root folder of research
names : str, list or None
names of units (pipleines and functions) to load
variables : str, list or None
names of variables to load
iterations : int, list or None
iterations to load
repetition : int
index of repetition to load
configs, aliases : dict, Config, Option, Domain or None
configs to load
use_alias : bool
if True, use alias for model name, else use its full name.
Defaults to True
concat_config : bool
if True, concatenate all config options into one string and store
it in 'config' column, else use separate column for each option.
Defaults to False
drop_columns : bool
used only if `concat_config=True`. Drop or not columns with options and
leave only concatenated config.
kwargs : dict
kwargs will be interpreted as config paramter
Returns
-------
pandas.DataFrame or dict
will have columns: iteration, name (of pipeline/function)
and column for config. Also it will have column for each variable of pipeline
and output of the function that was saved as a result of the research.
**How to perform slicing**
Method `load` with default parameters will create pandas.DataFrame with all dumped
parameters. To specify subset of results one can define names of pipelines/functions,
produced variables/outputs of them, iterations and configs. For example,
we have the following research:
```
domain = Option('layout', ['cna', 'can', 'acn']) * Option('model', [VGG7, VGG16])
research = (Research()
.add_pipeline(train_ppl, variables='loss', name='train')
.add_pipeline(test_ppl, name='test', execute=100, run=True, import_from='train')
.add_callable(accuracy, returns='accuracy', name='test_accuracy',
execute=100, pipeline='test')
.add_domain(domain))
research.run(n_iters=10000)
```
The code
```
Results(research=research).load(iterations=np.arange(5000, 10000),
variables='accuracy', names='test_accuracy',
configs=Option('layout', ['cna', 'can']))
```
will load output of ``accuracy`` function for configs
that contain layout 'cna' or 'can' for iterations starting with 5000.
The resulting dataframe will have columns 'iteration', 'name',
'accuracy', 'layout', 'model'. One can get the same in the follwing way:
```
results = Results(research=research).load()
results = results[(results.iterations >= 5000) &
(results.name == 'test_accuracy') & results.layout.isin(['cna', 'can'])]
```
"""
def __init__(self, path, *args, **kwargs):
self.path = path
self.description = self._get_description()
self.configs = None
self.df = self._load(*args, **kwargs)
def _get_list(self, value):
if not isinstance(value, list):
value = [value]
return value
def _sort_files(self, files, iterations):
files = {file: int(file.split('_')[-1]) for file in files}
files = OrderedDict(sorted(files.items(), key=lambda x: x[1]))
result = []
start = 0
iterations = [item for item in iterations if item is not None]
for name, end in files.items():
if len(iterations) == 0:
intersection = pd.np.arange(start, end)
else:
intersection = pd.np.intersect1d(iterations, pd.np.arange(start, end))
if len(intersection) > 0:
result.append((name, intersection))
start = end
return OrderedDict(result)
def _slice_file(self, dumped_file, iterations_to_load, variables):
iterations = dumped_file['iteration']
if len(iterations) > 0:
elements_to_load = pd.np.array([pd.np.isin(it, iterations_to_load) for it in iterations])
res = OrderedDict()
for variable in ['iteration', 'sample_index', *variables]:
if variable in dumped_file:
res[variable] = pd.np.array(dumped_file[variable])[elements_to_load]
else:
res = None
return res
def _concat(self, results, variables):
res = {key: [] for key in [*variables, 'iteration', 'sample_index']}
for chunk in results:
if chunk is not None:
for key, values in res.items():
if key in chunk:
values.extend(chunk[key])
return res
def _fix_length(self, chunk):
max_len = max([len(value) for value in chunk.values()])
for value in chunk.values():
if len(value) < max_len:
value.extend([pd.np.nan] * (max_len - len(value)))
def _filter_configs(self, config=None, alias=None, repetition=None):
result = None
if config is None and alias is None and repetition is None:
raise ValueError('At least one of parameters config, alias and repetition must be not None')
result = []
if repetition is not None:
repetition = {'repetition': repetition}
else:
repetition = dict()
if config is None and alias is None:
config = dict()
for supconfig in self.configs:
if config is not None:
config.update(repetition)
_config = supconfig.config()
if all(item in _config.items() for item in config.items()):
result.append(supconfig)
else:
_config = supconfig.alias()
alias.update(repetition)
if all(item in _config.items() for item in alias.items()):
result.append(supconfig)
self.configs = result
def _get_description(self):
with open(os.path.join(self.path, 'description', 'research.json'), 'r') as file:
return json.load(file)
def _load(self, names=None, variables=None, iterations=None, repetition=None, sample_index=None,
configs=None, aliases=None, use_alias=True, concat_config=False, drop_columns=True, **kwargs):
self.configs = []
for filename in glob.glob(os.path.join(self.path, 'configs', '*')):
with open(filename, 'rb') as f:
self.configs.append(dill.load(f))
if len(kwargs) > 0:
if configs is None:
configs = kwargs
else:
configs.update(kwargs)
if configs is not None:
self._filter_configs(config=configs, repetition=repetition)
elif aliases is not None:
self._filter_configs(alias=aliases, repetition=repetition)
elif repetition is not None:
self._filter_configs(repetition=repetition)
if names is None:
names = list(self.description['executables'].keys())
if variables is None:
variables = [variable
for unit in self.description['executables'].values()
for variable in unit['variables']
]
names = self._get_list(names)
variables = self._get_list(variables)
iterations = self._get_list(iterations)
all_results = []
for config_alias in self.configs:
alias_str = config_alias.alias(as_string=True)
_repetition = config_alias.pop_config('repetition')
_update = config_alias.pop_config('update')
path = os.path.join(self.path, 'results', alias_str)
for unit in names:
sample_folders = glob.glob(os.path.join(glob.escape(path), sample_index or '*'))
for sample_folder in sample_folders:
files = glob.glob(glob.escape(os.path.join(sample_folder, unit)) + '_[0-9]*')
files = self._sort_files(files, iterations)
if len(files) != 0:
res = []
for filename, iterations_to_load in files.items():
with open(filename, 'rb') as file:
res.append(self._slice_file(dill.load(file), iterations_to_load, variables))
res = self._concat(res, variables)
self._fix_length(res)
config_alias.pop_config('_dummy')
if concat_config:
res['config'] = config_alias.alias(as_string=True)
if use_alias:
if not concat_config or not drop_columns:
res.update(config_alias.alias(as_string=False))
else:
res.update(config_alias.config())
res.update({'repetition': _repetition.config()['repetition']})
res.update({'update': _update.config()['update']})
all_results.append(
pd.DataFrame({
'name': unit,
**res
})
)
return | pd.concat(all_results, sort=False) | pandas.concat |
# This script runs expanded econometric models using both old and new data
# Import required modules
import pandas as pd
import numpy as np
import statsmodels.api as stats
from ToTeX import restab
# Reading in the data
data = | pd.read_csv('C:/Users/User/Documents/Data/demoforestation_differenced_spatial.csv', encoding = 'cp1252') | pandas.read_csv |
'''
Esta clase permite automatizar el proceso de exportacion de datos de un CSV a base de datos
'''
import pandas as pd
from pathlib import Path
import re
import numpy as np
class DataExportManager:
@staticmethod
def exportAttributes(MyConnection):
base_path = Path(__file__).parent
file_path = (base_path / "data_csv/datosAtributosCsv.csv").resolve()
header=['TAGS','IDAG','ATRIBUTO GENERAL','IDAE','ATRIBUTO ESPECIFICO']
dfAttributes = pd.read_csv(file_path, header=0, names=header, encoding='utf-8')
for index, row in dfAttributes.iterrows():
if(not MyConnection.addAtribute(row[2],row[4])):
print('Error')
break
print('atributo: ',index,' ok')
dfAttributes.to_csv(file_path, encoding="utf-8", index=False)
print('::ok::Atributos exportados...')
return "ok"
@staticmethod
def exportAutos(MyConnection):
base_path = Path(__file__).parent
file_path = (base_path / "data_csv/autos_data_mod_csv.csv").resolve()
dfAutos = | pd.read_csv(file_path,encoding='utf-8') | pandas.read_csv |
#!/usr/bin/env python3
import pytest
import os
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import math
import torch
from neuralprophet import NeuralProphet, set_random_seed
from neuralprophet import df_utils
log = logging.getLogger("NP.test")
log.setLevel("WARNING")
log.parent.setLevel("WARNING")
DIR = pathlib.Path(__file__).parent.parent.absolute()
DATA_DIR = os.path.join(DIR, "tests", "test-data")
PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv")
AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")
YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv")
NROWS = 256
EPOCHS = 2
BATCH_SIZE = 64
LR = 1.0
PLOT = False
def test_names():
log.info("testing: names")
m = NeuralProphet()
m._validate_column_name("hello_friend")
def test_train_eval_test():
log.info("testing: Train Eval Test")
m = NeuralProphet(
n_lags=10,
n_forecasts=3,
ar_sparsity=0.1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
df = m._handle_missing_data(df, freq="D", predicting=False)
df_train, df_test = m.split_df(df, freq="D", valid_p=0.1)
metrics = m.fit(df_train, freq="D", validation_df=df_test)
val_metrics = m.test(df_test)
log.debug("Metrics: train/eval: \n {}".format(metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
log.debug("Metrics: test: \n {}".format(val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
def test_df_utils_func():
log.info("testing: df_utils Test")
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
# test find_time_threshold
df_dict, _ = df_utils.prep_copy_df_dict(df)
time_threshold = df_utils.find_time_threshold(df_dict, n_lags=2, valid_p=0.2, inputs_overbleed=True)
df_train, df_val = df_utils.split_considering_timestamp(
df_dict, n_lags=2, n_forecasts=2, inputs_overbleed=True, threshold_time_stamp=time_threshold
)
# init data params with a list
global_data_params = df_utils.init_data_params(df_dict, normalize="soft")
global_data_params = df_utils.init_data_params(df_dict, normalize="soft1")
global_data_params = df_utils.init_data_params(df_dict, normalize="standardize")
log.debug("Time Threshold: \n {}".format(time_threshold))
log.debug("Df_train: \n {}".format(type(df_train)))
log.debug("Df_val: \n {}".format(type(df_val)))
def test_trend():
log.info("testing: Trend")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
growth="linear",
n_changepoints=10,
changepoints_range=0.9,
trend_reg=1,
trend_reg_threshold=False,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_custom_changepoints():
log.info("testing: Custom Changepoints")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
dates = df["ds"][range(1, len(df) - 1, int(len(df) / 5.0))]
dates_list = [str(d) for d in dates]
dates_array = pd.to_datetime(dates_list).values
log.debug("dates: {}".format(dates))
log.debug("dates_list: {}".format(dates_list))
log.debug("dates_array: {} {}".format(dates_array.dtype, dates_array))
for cp in [dates_list, dates_array]:
m = NeuralProphet(
changepoints=cp,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_no_trend():
log.info("testing: No-Trend")
df = pd.read_csv(PEYTON_FILE, nrows=512)
m = NeuralProphet(
growth="off",
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_seasons():
log.info("testing: Seasonality: additive")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="additive",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("SUM of yearly season params: {}".format(sum(abs(m.model.season_params["yearly"].data.numpy()))))
log.debug("SUM of weekly season params: {}".format(sum(abs(m.model.season_params["weekly"].data.numpy()))))
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
log.info("testing: Seasonality: multiplicative")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
# m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="multiplicative",
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
def test_custom_seasons():
log.info("testing: Custom Seasonality")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
other_seasons = False
m = NeuralProphet(
yearly_seasonality=other_seasons,
weekly_seasonality=other_seasons,
daily_seasonality=other_seasons,
seasonality_mode="additive",
# seasonality_mode="multiplicative",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m = m.add_seasonality(name="quarterly", period=90, fourier_order=5)
log.debug("seasonalities: {}".format(m.season_config.periods))
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar():
log.info("testing: AR")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=7,
yearly_seasonality=False,
epochs=EPOCHS,
# batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_sparse():
log.info("testing: AR (sparse")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=3,
n_lags=14,
ar_sparsity=0.5,
yearly_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_deep():
log.info("testing: AR-Net (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg():
log.info("testing: Lagged Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=2,
n_lags=3,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
m = m.add_lagged_regressor(names="A")
m = m.add_lagged_regressor(names="B", only_last_value=True)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=10)
forecast = m.predict(future)
if PLOT:
print(forecast.to_string())
m.plot_last_forecast(forecast, include_previous_forecasts=5)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg_deep():
log.info("testing: List of Lagged Regressors (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=1,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(15, min_periods=1).mean()
df["C"] = df["y"].rolling(30, min_periods=1).mean()
cols = [col for col in df.columns if col not in ["ds", "y"]]
m = m.add_lagged_regressor(names=cols)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
forecast = m.predict(df)
if PLOT:
# print(forecast.to_string())
# m.plot_last_forecast(forecast, include_previous_forecasts=10)
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_events():
log.info("testing: Events")
df = pd.read_csv(PEYTON_FILE)[-NROWS:]
playoffs = pd.DataFrame(
{
"event": "playoff",
"ds": pd.to_datetime(
[
"2008-01-13",
"2009-01-03",
"2010-01-16",
"2010-01-24",
"2010-02-07",
"2011-01-08",
"2013-01-12",
"2014-01-12",
"2014-01-19",
"2014-02-02",
"2015-01-11",
"2016-01-17",
"2016-01-24",
"2016-02-07",
]
),
}
)
superbowls = pd.DataFrame(
{
"event": "superbowl",
"ds": pd.to_datetime(["2010-02-07", "2014-02-02", "2016-02-07"]),
}
)
events_df = pd.concat((playoffs, superbowls))
m = NeuralProphet(
n_lags=2,
n_forecasts=30,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# set event windows
m = m.add_events(
["superbowl", "playoff"], lower_window=-1, upper_window=1, mode="multiplicative", regularization=0.5
)
# add the country specific holidays
m = m.add_country_holidays("US", mode="additive", regularization=0.5)
m.add_country_holidays("Indonesia")
m.add_country_holidays("Thailand")
m.add_country_holidays("Philippines")
m.add_country_holidays("Pakistan")
m.add_country_holidays("Belarus")
history_df = m.create_df_with_events(df, events_df)
metrics_df = m.fit(history_df, freq="D")
future = m.make_future_dataframe(df=history_df, events_df=events_df, periods=30, n_historic_predictions=90)
forecast = m.predict(df=future)
log.debug("Event Parameters:: {}".format(m.model.event_params))
if PLOT:
m.plot_components(forecast)
m.plot(forecast)
m.plot_parameters()
plt.show()
def test_future_reg():
log.info("testing: Future Regressors")
df = | pd.read_csv(PEYTON_FILE, nrows=NROWS + 50) | pandas.read_csv |
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
import torch
import pandas as pd
def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, cmap=plt.cm.YlOrBr):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
(Adapted from scikit-learn docs).
"""
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', origin='lower', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# Show all ticks
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# Label with respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Set alignment of tick labels
plt.setp(ax.get_xticklabels(), rotation=0, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
return fig, ax
# visualize accuracy and loss graph
def visualize_graph(train_losses, train_acc, test_losses, test_acc):
fig, axs = plt.subplots(2,2,figsize=(15,10))
axs[0, 0].plot(train_losses)
axs[0, 0].set_title("Training Loss")
axs[1, 0].plot(train_acc)
axs[1, 0].set_title("Training Accuracy")
axs[0, 1].plot(test_losses)
axs[0, 1].set_title("Test Loss")
axs[1, 1].plot(test_acc)
axs[1, 1].set_title("Test Accuracy")
def visualize_save_train_vs_test_graph(EPOCHS, dict_list, title, xlabel, ylabel, PATH, name="fig"):
plt.figure(figsize=(20,10))
#epochs = range(1,EPOCHS+1)
for label, item in dict_list.items():
x = np.linspace(1, EPOCHS+1, len(item))
plt.plot(x, item, label=label)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.savefig(PATH+"/"+name+".png")
def evaluation_pred(model, iterator, itos=None, tokenizer=None):
# deactivating dropout layers
model.eval()
if itos is not None:
eval_df = pd.DataFrame(columns=['src','trg','pred'])
else:
eval_df = pd.DataFrame(columns=['trg','pred'])
# deactivates autograd
with torch.no_grad():
for batch in iterator:
# retrieve text and no. of words
text, text_lengths = batch.src
label = batch.trg.cpu().numpy()
# convert to 1D tensor
predictions = model(text, text_lengths)
top_pred = predictions.argmax(1, keepdim = True).cpu().numpy()
batch_df = | pd.DataFrame(top_pred, columns=['pred']) | pandas.DataFrame |
import argparse
import numpy as np
import pandas as pd
import os
import sys
import time
from lightgbm import LGBMClassifier
from sklearn.preprocessing import LabelEncoder
import cleanlab
from cleanlab.pruning import get_noise_indices
model = 'clean_embed_all-mpnet-base-v2.csv'
df = pd.read_csv('/global/project/hpcg1614_shared/ca/data/banking77/{}'.format(model))
df_orig = | pd.read_csv('clean.csv') | pandas.read_csv |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try:
import talib
except:
print('PLEASE install TALIB to call these methods')
import pandas as pd
def CMO(Series, timeperiod=14):
res = talib.CMO(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def BBANDS(Series, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
up, middle, low = talib.BBANDS(
Series.values, timeperiod, nbdevup, nbdevdn, matype)
return pd.Series(up, index=Series.index), pd.Series(middle, index=Series.index), pd.Series(low, index=Series.index)
def BETA(SeriesA, SeriesB, timeperiod=5):
res = talib.BETA(SeriesA.values, SeriesB.values, timeperiod)
return | pd.Series(res, index=SeriesA.index) | pandas.Series |
import os, re, getopt, sys
import numpy as np
import pandas as pd
from matplotlib import pyplot
from pathlib import Path
#####################################################################################
## small utils
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
def shortenGraphName(arg):
if not isinstance(arg, str):
return arg
name = basename(arg)
return re.sub("___symmetrized-sorted", "", name)
def basename(arg):
"""Try to extract a basename from the provided argument."""
if not isinstance(arg, str):
return arg
try:
name = os.path.basename(arg)
except:
name = arg
fileExtensions = ['el', 'txt', 'dat', 'csv', 'exe', 'out']
for ext in fileExtensions:
name = re.sub(r'\.'+'{}$'.format(ext), '', name)
name = re.sub(r'\.', '_', name)
return name
def readData(path, extractBasenames=True, shortenGraphNames=True):
data = pd.read_csv(path, sep=" ", header=None)
if extractBasenames:
data = data.applymap(basename)
if shortenGraphNames:
data = data.applymap(shortenGraphName)
return data
def savefig(fig, path):
dirc = os.path.dirname(path)
if not (os.path.exists(dirc) and os.path.isdir(dirc)):
os.makedirs(dirc)
fig.savefig(path)
#####################################################################################
## plotting stuff
def SetPueschelStyle(axes):
"""Set a graph style like prof. pueschel.
Style includes a grey backgroud, a horizontal grid with
white lines."""
if isinstance(axes, pyplot.Figure):
axes = axes.gca()
if not isinstance(axes, pyplot.Axes):
raise ValueError("axes must be pyplot.Figure or pyplot.Axes")
axes.set_facecolor('xkcd:light grey')
for spine in ['top', 'right', 'bottom', 'left']:
axes.spines[spine].set_visible(False)
axes.grid(which='major', axis='y', color='w',
linestyle='-', linewidth=2)
axes.tick_params(axis='y', length=0)
def PlotRuntime(threads, time, figure=None, title_suffix=None, pltkwargs=dict(), **kwargs):
data = pd.DataFrame()
data['threads'] = threads
data['trial_time'] = time
if figure == None:
figure = pyplot.figure()
ax = figure.gca()
pooling = 'mean'
if "pooling" in kwargs:
pooling = kwargs["pooling"]
if pooling == 'mean':
algTime = data.groupby(['threads']).mean()
elif pooling == 'median':
algTime = data.groupby(['threads']).median()
else:
raise ValueError("unknown pooling strategy")
ax.plot(algTime.index, algTime['trial_time'], **pltkwargs)
ax.set_xlabel('# threads')
ax.set_ylabel('Runtime [s]')
if title_suffix == None:
ax.set_title('Runtime')
else:
ax.set_title('Runtime [{}]'.format(title_suffix))
return figure, ax
def PlotSpeedup(threads, time, figure=None, title_suffix=None, pltkwargs=dict(), **kwargs):
data = pd.DataFrame()
data['threads'] = threads
data['trial_time'] = time
if figure == None:
figure = pyplot.figure()
ax = figure.gca()
pooling = 'mean'
if "pooling" in kwargs:
pooling = kwargs["pooling"]
if pooling == 'mean':
algTime = data.groupby(['threads']).mean()
elif pooling == 'median':
algTime = data.groupby(['threads']).median()
else:
raise ValueError("unknown pooling strategy")
baseline = algTime.iloc[0]['trial_time']
algTime['speedup'] = baseline/algTime['trial_time']
ax.plot(algTime.index, algTime['speedup'], **pltkwargs)
ax.set_xlabel('# threads')
ax.set_ylabel('Speedup [1]')
if title_suffix == None:
ax.set_title('Speedup')
else:
ax.set_title('Speedup [{}]'.format(title_suffix))
return figure, ax
def PlotPreprocessingInfo(pp_method, pp_time, time, no_pp_method, threads=None,
title_suffix = None, pltkwargs=dict(), **kwargs):
data = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2021 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights in this software.
"""This module contains utilities for loading and saving SampleSet data files."""
import copy
import logging
import os
import pathlib
import pickle
import re
import warnings
import pandas as pd
from riid import DATA_PATH, DataDirectoryNotFoundError
from riid.sampleset import SampleSet
def _check_data_path():
if not os.path.exists(DATA_PATH):
raise DataDirectoryNotFoundError()
def check_iso_name(name: str):
""" Validates whether or not the given string contains
a properly formatted radioisotope name.
Note that this function does NOT look up a string
to determine if the string corresponds to a
radioisotope that actually exists, it just checks
the format.
The regular expression used by this function
looks for the following (in order):
- 1 capital letter
- 0 to 1 lowercase letters
- 1 to 3 numbers
- an optional "m" for metastable
Examples of properly formatted isotope names:
- Y88
- Ba133
- Ho166m
Args:
name: the string to be checked
Returns:
True if the string has a valid format,
otherwise False.
"""
validator = re.compile(r"^[A-Z]{1}[a-z]{0,1}[0-9]{1,3}m?$")
other_valid_names = ["fiestaware"]
match = validator.match(name)
is_valid = match is not None or \
name.lower() in other_valid_names
return is_valid
def load_samples_from_file(file_path: str, verbose=1) -> SampleSet:
"""Load samples from the given file_path."""
ss = None
if os.path.isfile(file_path):
message = "Found samples, loading '{}'.".format(file_path)
if verbose:
logging.info(message)
try:
raw_ss = read_hdf(file_path)
except OSError:
with open(file_path, "rb") as fin:
raw_ss = pickle.load(fin)
kwargs = raw_ss.__dict__
# Make instance of most recent SampleSet using data from loaded file.
ss = SampleSet(**kwargs)
else:
message = "No samples were found for the given parameters."
if verbose:
logging.info(message)
ss = None
return ss
def load_samples(detector: str, measured_or_synthetic: str, train_or_test: str, file_name: str,
verbose=1) -> SampleSet:
"""Load samples for a given detector, type, and file name.
"""
file_path = os.path.join(DATA_PATH, detector, measured_or_synthetic, train_or_test, file_name)
file_path = os.path.expanduser(file_path)
ss = load_samples_from_file(file_path, verbose=verbose)
return ss
def save_samples_to_file(ss: SampleSet, file_path: str, verbose=1):
"""Writes out the given sampleset to disk at the given path."""
try:
write_hdf(ss, file_path)
except OSError:
with open(file_path, "wb") as fout:
pickle.dump(ss, fout)
if verbose:
logging.info(f"Saved SampleSet to '{file_path}'")
def save_samples(ss: SampleSet, file_name: str, detector: str = None,
measured_or_synthetic: str = None, purpose: str = None, verbose=1):
"""Save the given samples to the appropriate data directory location."""
_check_data_path()
if not ss:
raise EmptySampleSetError("No samples were provided")
if detector:
ss.detector = detector
if measured_or_synthetic:
ss.measured_or_synthetic = measured_or_synthetic
if purpose:
ss.purpose = purpose
output_dir = os.path.join(
DATA_PATH,
ss.detector,
ss.measured_or_synthetic,
ss.purpose
)
output_dir = os.path.expanduser(output_dir)
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
output_path = os.path.join(
DATA_PATH,
ss.detector,
ss.measured_or_synthetic,
ss.purpose,
file_name
)
save_samples_to_file(ss, output_path, verbose)
def save_detector_model(model_contents: str, model_hash: str, detector_name: str):
"""Save detector model
"""
output_fn = os.path.join(
DATA_PATH,
detector_name,
f"{model_hash}.dat"
)
with open(output_fn, "w") as fout:
fout.write(model_contents)
def load_seeds(detector: str, measured_or_synthetic: str, file_name: str, verbose=1) -> SampleSet:
"""Load seeds for a given detector, type, and file_name.
"""
_check_data_path()
load_path = os.path.join(DATA_PATH, detector, measured_or_synthetic, "seed", file_name)
load_path = os.path.abspath(os.path.expanduser(load_path))
if not os.path.isfile(load_path):
message = "No seeds were found for the given configuration at path {}".format(load_path)
raise NoSeedsFoundError(message)
if verbose:
message = "Found seeds, loading '{}'.".format(load_path)
logging.info(message)
try:
raw_seeds = read_hdf(load_path)
except OSError:
raw_seeds = pickle.load(open(load_path, "rb"))
ss = SampleSet(**raw_seeds.__dict__)
return ss
def save_seeds(ss: SampleSet, file_name: str, detector: str = None,
measured_or_synthetic: str = None, verbose=1):
"""Load seeds for a given detector, type, and file_name."""
_check_data_path()
if not ss:
raise EmptySampleSetError("No seeds were provided.")
if detector:
ss.detector = detector
if measured_or_synthetic:
ss.measured_or_synthetic = measured_or_synthetic
save_samples(ss, file_name, verbose=verbose)
def read_hdf(file_name: str) -> SampleSet:
""" Reads sampleset class from hdf binary format."""
spectra = pd.read_hdf(file_name, "spectra")
try:
collection_information = pd.read_hdf(file_name, "collection_information")
except:
collection_information = pd.read_hdf(file_name, "info")
sources = pd.read_hdf(file_name, "sources")
sources.columns = [i.split("___")[0] for i in sources.columns]
features = | pd.read_hdf(file_name, "features") | pandas.read_hdf |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import glob
import os
import sys
import datetime
import urllib.request
import sys
from sklearn import datasets, linear_model
import csv
from scipy import stats
import pylab
Calculated_GDD=[]
df = pd.DataFrame()
df2 = | pd.DataFrame() | pandas.DataFrame |
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn.metrics import classification_report, accuracy_score # calculating measures for accuracy assessment
from osgeo import gdal
import joblib
import sys
# sys.path.append(r"F:\Work\Maptor\venv\Model")
from ReportModule import ReportModule
import pandas as pd
import matplotlib.pyplot as plt
class ClassificationModel():
Trees=500
def set_trees(self,trees):
self.Trees = trees
def get_trees(self):
return self.Trees
def rf_classifier(self,img,img_ds,roi,trees):
try:
#rt = ReportModule()
n_samples = (roi > 0).sum()
#print('We have {n} training samples'.format(n=n_samples))
# What are our classification labels?
labels = np.unique(roi[roi > 0])
#print('The training data include {n} classes: {classes}'.format(n=labels.size,
# classes=labels))
X = img[roi > 0, :]
y = roi[roi > 0]
# print('Our X matrix is sized: {sz}'.format(sz=X.shape))
#
# print('Our y array is sized: {sz}'.format(sz=y.shape))
# Train Random Forest
rf = RandomForestClassifier(n_estimators=trees, oob_score=True,verbose=2, n_jobs=-1 )
X = np.nan_to_num(X)
rf = rf.fit(X, y)
ob_score = round(rf.oob_score_ * 100, 2)
importance = {}
bands = range(1, img_ds.RasterCount + 1)
for b, imp in zip(bands, rf.feature_importances_):
importance[b] = round(imp * 100, 2)
#print('Band {b} importance: {imp} %'.format(b=b, imp=round(imp * 100, 2)))
# Let's look at a crosstabulation to see the class confusion.
# To do so, we will import the Pandas library for some help:
# Setup a dataframe -- just like R
# Exception Handling because of possible Memory Error
try:
df = | pd.DataFrame() | pandas.DataFrame |
#%%%%%%%%%%%%%%%%%%%%%%% Prepare for testing %%%%%%%%%%%%%%
import os
import backtest_pkg.backtest_portfolio as bt
import pandas as pd
from IPython.display import display
import importlib
os.chdir(r'M:\Share\Colleagues\Andy\Python Project\Backtest Module')
price_data = pd.read_csv('pkg_test/Adjusted_Price.csv', index_col=0, parse_dates=True)
#########################################################################
######## Portfolio construction ###############
#########################################################################
#%%%%%%%%%%%%%%%%%%%%%%% Portfolio from weight %%%%%%%%%%%%%%%
importlib.reload(bt)
# Small testing date:
small_price_data = price_data.iloc[:10, :5]
small_weight = pd.DataFrame(data=[[1, 2, 3]], index=[price_data.index[0]], columns=price_data.columns[:3])
small_share = pd.DataFrame(data=1, index=[price_data.index[0]], columns=price_data.columns[:3])
# Initiate a portfolio by weight:
small_port = bt.portfolio(small_weight, name='Small Portfolio')
# Better way to initialize a portfolio:
# small_port = bt.portfolio(weight = small_weight, name='Small Portfolio')
small_port.set_price(small_price_data)
display(small_port.weight)
##### Check some properties:
# End date:
small_port.end_date = None # Default: last date of price data
print(f'End date (default): {small_port.end_date:%Y-%m-%d}')
small_port.end_date = pd.datetime(2013, 1, 10) # Manually set
print(f'End date (set): {small_port.end_date:%Y-%m-%d}')
# Trading_status:
small_port.trading_status = None # Default: TRUE if price is available
display(f'Trading Status (default):')
display(small_port.trading_status)
trading_status = small_price_data.notna()
trading_status.loc[:'2013-01-10', 'ALB SQ Equity'] = False
small_port.trading_status = trading_status # Set for specific requirement
display(f'Trading Status (set):')
display(small_port.trading_status)
# Daily return: calculate from price, the first available date has return 1.
display(f'Daily Return:')
display(small_port.daily_ret)
#%%%%%%%%%%%%%%%%%% Portfolio from share %%%%%%%%%%%%%%%%%%%%%
# Initiate a portfolio by share:
small_port = bt.portfolio(share = small_share, name='Small share Portfolio')
small_port.set_price(small_price_data)
display(small_port.weight)
##### Check some properties:
# End date:
small_port.end_date = None # Default: last date of price data
print(f'End date (default): {small_port.end_date:%Y-%m-%d}')
small_port.end_date = pd.datetime(2013, 1, 10) # Manually set
print(f'End date (set): {small_port.end_date:%Y-%m-%d}')
# Trading_status:
small_port.trading_status = None # Default: TRUE if price is available
display(f'Trading Status (default):')
display(small_port.trading_status)
trading_status = small_price_data.notna()
trading_status.loc[:'2013-01-10', 'ALB SQ Equity'] = False
small_port.trading_status = trading_status # Set for specific requirement
display(f'Trading Status (set):')
display(small_port.trading_status)
# Daily return: calculate from price, the first available date has return 1.
display(f'Daily Return:')
display(small_port.daily_ret)
##################### Error handling ##############################
importlib.reload(bt)
#%%%%%%%%%%%%%%%%%%%% Weight in untradable security %%%%%%%%%%%%%%%%%
small_weight = pd.DataFrame(data=[[1, 2, 3]], index=[small_price_data.index[0]], columns= small_price_data.columns[-3:])
small_share = pd.DataFrame(data=1, index=[small_price_data.index[0]], columns=small_price_data.columns[-3:])
small_port = bt.portfolio(small_weight, name='Small error Portfolio')
small_port.set_price(small_price_data)
display(small_weight)
display(small_port.weight)
#%%%%%%%%%%%%%%%%%%% Share in untradable security %%%%%%%%%%%%%%%%%%%%%
small_port = bt.portfolio(share=small_share, name='Small error Portfolio from share')
small_port.set_price(small_price_data)
display(small_share)
display(small_port.weight)
#%%%%%%%%%%%%%%%%%%%%%% Unknown tickers %%%%%%%%%%%%%%%%%
small_weight = pd.DataFrame(data=[[1, 2, 3]], index=[small_price_data.index[0]], columns= small_price_data.columns[:3])
small_weight['Strange Equity']=1
small_port = bt.portfolio(small_weight, name='Small Portfolio with unknown ticker')
small_port.set_price(small_price_data)
display('Input weight:')
display(small_weight)
display('Output weight:')
display(small_port.weight)
#%%%%%%%%%%%%%%%%%%%%%%% Outrange date %%%%%%%%%%%%%%%%%%%%%%%%
small_weight = pd.DataFrame(data=[[1, 2, 3]], index=[small_price_data.index[0]], columns= small_price_data.columns[:3])
small_weight.loc[pd.to_datetime('2019-01-01'), :] = 1
small_port = bt.portfolio(small_weight, name='Small Portfolio with unknown ticker')
small_port.set_price(small_price_data)
display('Input weight:')
display(small_weight)
display('Final weight:')
display(small_port.weight)
#%%%%%%%%%%%%%%%%%%%%%% Unknown tickers in share %%%%%%%%%%%%%%%%%
small_share = pd.DataFrame(data=1, index=[small_price_data.index[0]], columns= small_price_data.columns[:3])
small_share['Strange Equity']=1
small_port = bt.portfolio(share = small_share, name='Small Portfolio with unknown ticker')
small_port.set_price(small_price_data)
display('Input weight:')
display(small_share)
display('Final weight:')
display(small_port.weight)
#%%%%%%%%%%%%%%%%%%%%%% Outrange date in share %%%%%%%%%%%%%%%%%
small_share = pd.DataFrame(data=1, index=[small_price_data.index[0]], columns= small_price_data.columns[:3])
small_share.loc[pd.to_datetime('2019-01-01'), :]=1
small_port = bt.portfolio(share = small_share, name='Small Portfolio with unknown ticker')
small_port.set_price(small_price_data)
display('Input weight:')
display(small_share)
display('Final weight:')
display(small_port.weight)
########################################################################
################## Backtesting ###############
########################################################################
#%%%%%%%%%%%%%% Construct a small portfolio %%%%%%%%%%%%%%
importlib.reload(bt)
small_price_data = price_data.iloc[:10, :5]
# Initiate an equal weight portfolio by weight:
small_weight = pd.DataFrame(data=1, index=[small_price_data.index[0]], columns=small_price_data.columns)
small_port = bt.portfolio(small_weight, name='Small Portfolio')
small_port.set_price(small_price_data)
#%%%%%%%%%%%%%%%%%%%%% Simple drift %%%%%%%%%%%%%%%%%%%%
drifting_weight = small_port._drift_weight(small_port.weight)
display('Simple drift')
display(drifting_weight)
#%%%%%%%%%%%%%%%%%%%% Rebalanced drift %%%%%%%%%%%%%%%%%
rebalance_date = pd.datetime(2013, 1, 10)
rebalanced_weight = | pd.DataFrame(data=1, index=[rebalance_date], columns=small_price_data.columns) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = | pd.Series(['1.0', 2, -3, '2.0']) | pandas.Series |
import pandas as pd
def create_script(func_str, output_script_file_path=r"./machine_induced_script.py"):
output_script = \
"""import sys
input_file_path = sys.argv[1]
output_file_path = sys.argv[2]
log_lines = open(input_file_path, "r").readlines()
{}
open(output_file_path, "w").write("\\n".join(output_list))
print(\"done writing\", output_file_path)""".format(func_str)
open(output_script_file_path, "w").write(output_script)
def get_all_substrings(input_string):
length = len(input_string)
return set([input_string[i:j+1] for i in range(length) for j in range(i,length)])
def get_intersection_of_list_of_sets(list_of_sets):
set_0 = list_of_sets[0]
for a_set in list_of_sets[1:]:
set_0 = set_0.intersection(a_set)
return set_0
def get_max_length_common_string(string_1, string_2):
union_list = [substring for substring in get_all_substrings(string_1) if substring in string_2]
common_string = union_list[ | pd.Series(union_list) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_FP_S1 Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for timber plantation. Source: Khasanah et al. (2015)
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
#df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
t = range(0,tf,1)
c_firewood_energy_S1 = df1['Firewood_other_energy_use'].values
#c_loss_S2 = df2['C_loss'].values
c_firewood_energy_E = dfE['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
c_pellets_E = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S1
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
tf = 201
t = np.arange(tf)
def decomp_S1(t,remainAGB_S1):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1
#set zero matrix
output_decomp_S1 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1[i:,i] = decomp_S1(t[:len(t)-i],remain_part_S1)
print(output_decomp_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1[:,i] = np.diff(output_decomp_S1[:,i])
i = i + 1
print(subs_matrix_S1[:,:4])
print(len(subs_matrix_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1 = subs_matrix_S1.clip(max=0)
print(subs_matrix_S1[:,:4])
#make the results as absolute values
subs_matrix_S1 = abs(subs_matrix_S1)
print(subs_matrix_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1)
subs_matrix_S1 = np.vstack((zero_matrix_S1, subs_matrix_S1))
print(subs_matrix_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1 = (tf,1)
decomp_tot_S1 = np.zeros(matrix_tot_S1)
i = 0
while i < tf:
decomp_tot_S1[:,0] = decomp_tot_S1[:,0] + subs_matrix_S1[:,i]
i = i + 1
print(decomp_tot_S1[:,0])
#S1_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_S1')
tf = 201
t = np.arange(tf)
def decomp_S1_C(t,remainAGB_S1_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1_C
#set zero matrix
output_decomp_S1_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1_C[i:,i] = decomp_S1_C(t[:len(t)-i],remain_part_S1_C)
print(output_decomp_S1_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_C[:,i] = np.diff(output_decomp_S1_C[:,i])
i = i + 1
print(subs_matrix_S1_C[:,:4])
print(len(subs_matrix_S1_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_C = subs_matrix_S1_C.clip(max=0)
print(subs_matrix_S1_C[:,:4])
#make the results as absolute values
subs_matrix_S1_C = abs(subs_matrix_S1_C)
print(subs_matrix_S1_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1_C)
subs_matrix_S1_C = np.vstack((zero_matrix_S1_C, subs_matrix_S1_C))
print(subs_matrix_S1_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_C = (tf,1)
decomp_tot_S1_C = np.zeros(matrix_tot_S1_C)
i = 0
while i < tf:
decomp_tot_S1_C[:,0] = decomp_tot_S1_C[:,0] + subs_matrix_S1_C[:,i]
i = i + 1
print(decomp_tot_S1_C[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_E(t,remainAGB_E):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E
#set zero matrix
output_decomp_E = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E[i:,i] = decomp_E(t[:len(t)-i],remain_part_E)
print(output_decomp_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E[:,i] = np.diff(output_decomp_E[:,i])
i = i + 1
print(subs_matrix_E[:,:4])
print(len(subs_matrix_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E = subs_matrix_E.clip(max=0)
print(subs_matrix_E[:,:4])
#make the results as absolute values
subs_matrix_E = abs(subs_matrix_E)
print(subs_matrix_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E)
subs_matrix_E = np.vstack((zero_matrix_E, subs_matrix_E))
print(subs_matrix_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E = (tf,1)
decomp_tot_E = np.zeros(matrix_tot_E)
i = 0
while i < tf:
decomp_tot_E[:,0] = decomp_tot_E[:,0] + subs_matrix_E[:,i]
i = i + 1
print(decomp_tot_E[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_E')
tf = 201
t = np.arange(tf)
def decomp_E_C(t,remainAGB_E_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E_C
#set zero matrix
output_decomp_E_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E_C[i:,i] = decomp_E_C(t[:len(t)-i],remain_part_E_C)
print(output_decomp_E_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E_C[:,i] = np.diff(output_decomp_E_C[:,i])
i = i + 1
print(subs_matrix_E_C[:,:4])
print(len(subs_matrix_E_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_C = subs_matrix_E_C.clip(max=0)
print(subs_matrix_E_C[:,:4])
#make the results as absolute values
subs_matrix_E_C = abs(subs_matrix_E_C)
print(subs_matrix_E_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E_C)
subs_matrix_E_C = np.vstack((zero_matrix_E_C, subs_matrix_E_C))
print(subs_matrix_E_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_C = (tf,1)
decomp_tot_E_C = np.zeros(matrix_tot_E_C)
i = 0
while i < tf:
decomp_tot_E_C[:,0] = decomp_tot_E_C[:,0] + subs_matrix_E_C[:,i]
i = i + 1
print(decomp_tot_E_C[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_S1,label='S1')
#plt.plot(t,decomp_tot_S2,label='S2')
plt.plot(t,decomp_tot_E,label='E')
plt.xlim(0,200)
plt.legend(loc='best', frameon=False)
plt.show()
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
#product lifetime
#building materials
B = 35
TestDSM1 = DynamicStockModel(t = df1['Year'].values, i = df1['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME = DynamicStockModel(t = dfE['Year'].values, i = dfE['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1, ExitFlag1 = TestDSM1.dimension_check()
CheckStrE, ExitFlagE = TestDSME.dimension_check()
Stock_by_cohort1, ExitFlag1 = TestDSM1.compute_s_c_inflow_driven()
Stock_by_cohortE, ExitFlagE = TestDSME.compute_s_c_inflow_driven()
S1, ExitFlag1 = TestDSM1.compute_stock_total()
SE, ExitFlagE = TestDSME.compute_stock_total()
O_C1, ExitFlag1 = TestDSM1.compute_o_c_from_s_c()
O_CE, ExitFlagE = TestDSME.compute_o_c_from_s_c()
O1, ExitFlag1 = TestDSM1.compute_outflow_total()
OE, ExitFlagE = TestDSME.compute_outflow_total()
DS1, ExitFlag1 = TestDSM1.compute_stock_change()
DSE, ExitFlagE = TestDSME.compute_stock_change()
Bal1, ExitFlag1 = TestDSM1.check_stock_balance()
BalE, ExitFlagE = TestDSME.check_stock_balance()
#print output flow
print(TestDSM1.o)
#print(TestDSM2.o)
print(TestDSME.o)
plt.plot(t, TestDSM1.o)
plt.xlim(0,100)
plt.show()
#%%
#Step (5): Biomass growth
# RIL_Scenario biomass growth, following RIL disturbance
#recovery time, follow the one by Alice-guier
#H = [M, E, C_M, C_E]
#LD0 = [M, E, C_M, C_E]
H = [2.89, 4.34, 2.89, 4.34]
LD0 = [53.46-2.89, 53.46-4.34, 29.29-2.89, 29.29-4.34]
s = 1.106
#RIL
RT = ((H[0] + LD0[0])*100/initAGB)**s
print(RT)
#growth per year (Mg C/ha.yr)
gpy = (H[0] + LD0[0])/RT
print(gpy)
tf_RIL_S1 = 36
A1 = range(0,tf_RIL_S1,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_S1(A1):
return 44/12*1000*gpy*A1
seq_RIL = np.array([Y_RIL_S1(A1i) for A1i in A1])
print(len(seq_RIL))
print(seq_RIL)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL = []
for i in counter_35y:
y_RIL.append(seq_RIL)
flat_list_RIL = []
for sublist in y_RIL:
for item in sublist:
flat_list_RIL.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL = flat_list_RIL[:len(flat_list_RIL)-15]
print(flat_list_RIL)
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL = [p - q for q, p in zip(flat_list_RIL, flat_list_RIL[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL = [0 if i < 0 else i for i in flat_list_RIL]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL = [ -x for x in flat_list_RIL]
print(flat_list_RIL)
#RIL_C
RT_C = ((H[2] + LD0[2])*100/initAGB)**s
print(RT_C)
#growth per year (Mg C/ha.yr)
gpy_C = (H[2] + LD0[2])/RT_C
print(gpy_C)
tf_RIL_C = 36
A1 = range(0,tf_RIL_C,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_C(A1):
return 44/12*1000*gpy_C*A1
seq_RIL_C = np.array([Y_RIL_C(A1i) for A1i in A1])
print(len(seq_RIL_C))
print(seq_RIL_C)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL_C = []
for i in counter_35y:
y_RIL_C.append(seq_RIL_C)
flat_list_RIL_C = []
for sublist_C in y_RIL_C:
for item in sublist_C:
flat_list_RIL_C.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL_C = flat_list_RIL_C[:len(flat_list_RIL_C)-15]
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL_C, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL_C = [p - q for q, p in zip(flat_list_RIL_C, flat_list_RIL_C[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL_C = [0 if i < 0 else i for i in flat_list_RIL_C]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL_C.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL_C = [ -x for x in flat_list_RIL_C]
print(flat_list_RIL_C)
#%%
#Step (5_1): Biomass C sequestration of the remaining unharvested block
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
df1_C = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
dfE_C = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_E')
t = range(0,tf,1)
RIL_seq_S1= df1['RIL_seq'].values
RIL_seq_C_S1 = df1_C['RIL_seq'].values
RIL_seq_E = dfE['RIL_seq'].values
RIL_seq_C_E = dfE_C['RIL_seq'].values
#%%
#Step (6): post-harvest processing of wood
#post-harvest wood processing
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
t = range(0,tf,1)
PH_Emissions_HWP1 = df1['PH_Emissions_HWP'].values
PH_Emissions_HWPE = dfE ['PH_Emissions_HWP'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1
df1_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1(t,Landfill_decomp_CH4_S1):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_S1
#set zero matrix
output_decomp_CH4_S1 = np.zeros((len(t),len(df1_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1 in enumerate(df1_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1[i:,i] = decomp_CH4_S1(t[:len(t)-i],remain_part_CH4_S1)
print(output_decomp_CH4_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1 = np.zeros((len(t)-1,len(df1_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1[:,i] = np.diff(output_decomp_CH4_S1[:,i])
i = i + 1
print(subs_matrix_CH4_S1[:,:4])
print(len(subs_matrix_CH4_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1 = subs_matrix_CH4_S1.clip(max=0)
print(subs_matrix_CH4_S1[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1 = abs(subs_matrix_CH4_S1)
print(subs_matrix_CH4_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1 = np.zeros((len(t)-200,len(df1_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1)
subs_matrix_CH4_S1 = np.vstack((zero_matrix_CH4_S1, subs_matrix_CH4_S1))
print(subs_matrix_CH4_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1 = (tf,1)
decomp_tot_CH4_S1 = np.zeros(matrix_tot_CH4_S1)
i = 0
while i < tf:
decomp_tot_CH4_S1[:,0] = decomp_tot_CH4_S1[:,0] + subs_matrix_CH4_S1[:,i]
i = i + 1
print(decomp_tot_CH4_S1[:,0])
#E
dfE_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_CH4_E(t,Landfill_decomp_CH4_E):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_E
#set zero matrix
output_decomp_CH4_E = np.zeros((len(t),len(dfE_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_E in enumerate(dfE_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_E[i:,i] = decomp_CH4_E(t[:len(t)-i],remain_part_CH4_E)
print(output_decomp_CH4_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_E = np.zeros((len(t)-1,len(dfE_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_E[:,i] = np.diff(output_decomp_CH4_E[:,i])
i = i + 1
print(subs_matrix_CH4_E[:,:4])
print(len(subs_matrix_CH4_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_E = subs_matrix_CH4_E.clip(max=0)
print(subs_matrix_CH4_E[:,:4])
#make the results as absolute values
subs_matrix_CH4_E = abs(subs_matrix_CH4_E)
print(subs_matrix_CH4_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_E = np.zeros((len(t)-200,len(dfE_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_E)
subs_matrix_CH4_E = np.vstack((zero_matrix_CH4_E, subs_matrix_CH4_E))
print(subs_matrix_CH4_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_E = (tf,1)
decomp_tot_CH4_E = np.zeros(matrix_tot_CH4_E)
i = 0
while i < tf:
decomp_tot_CH4_E[:,0] = decomp_tot_CH4_E[:,0] + subs_matrix_CH4_E[:,i]
i = i + 1
print(decomp_tot_CH4_E[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1,label='S1')
#plt.plot(t,decomp_tot_CH4_S2,label='S2')
plt.plot(t,decomp_tot_CH4_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
type(decomp_tot_CH4_S1[:,0])
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1
df1_CO2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1(t,Landfill_decomp_CO2_S1):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CO2_S1
#set zero matrix
output_decomp_CO2_S1 = np.zeros((len(t),len(df1_CO2['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1 in enumerate(df1_CO2['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1[i:,i] = decomp_CO2_S1(t[:len(t)-i],remain_part_CO2_S1)
print(output_decomp_CO2_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1 = np.zeros((len(t)-1,len(df1_CO2['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1[:,i] = np.diff(output_decomp_CO2_S1[:,i])
i = i + 1
print(subs_matrix_CO2_S1[:,:4])
print(len(subs_matrix_CO2_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1 = subs_matrix_CO2_S1.clip(max=0)
print(subs_matrix_CO2_S1[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1 = abs(subs_matrix_CO2_S1)
print(subs_matrix_CO2_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1 = np.zeros((len(t)-200,len(df1_CO2['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1)
subs_matrix_CO2_S1 = np.vstack((zero_matrix_CO2_S1, subs_matrix_CO2_S1))
print(subs_matrix_CO2_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1 = (tf,1)
decomp_tot_CO2_S1 = np.zeros(matrix_tot_CO2_S1)
i = 0
while i < tf:
decomp_tot_CO2_S1[:,0] = decomp_tot_CO2_S1[:,0] + subs_matrix_CO2_S1[:,i]
i = i + 1
print(decomp_tot_CO2_S1[:,0])
#E
dfE_CO2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_CO2_E(t,Landfill_decomp_CO2_E):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CO2_E
#set zero matrix
output_decomp_CO2_E = np.zeros((len(t),len(dfE_CO2['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_E in enumerate(dfE_CO2['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_E[i:,i] = decomp_CO2_E(t[:len(t)-i],remain_part_CO2_E)
print(output_decomp_CO2_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_E = np.zeros((len(t)-1,len(dfE_CO2['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_E[:,i] = np.diff(output_decomp_CO2_E[:,i])
i = i + 1
print(subs_matrix_CO2_E[:,:4])
print(len(subs_matrix_CO2_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_E = subs_matrix_CO2_E.clip(max=0)
print(subs_matrix_CO2_E[:,:4])
#make the results as absolute values
subs_matrix_CO2_E = abs(subs_matrix_CO2_E)
print(subs_matrix_CO2_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_E = np.zeros((len(t)-200,len(dfE_CO2['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_E)
subs_matrix_CO2_E = np.vstack((zero_matrix_CO2_E, subs_matrix_CO2_E))
print(subs_matrix_CO2_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_E = (tf,1)
decomp_tot_CO2_E = np.zeros(matrix_tot_CO2_E)
i = 0
while i < tf:
decomp_tot_CO2_E[:,0] = decomp_tot_CO2_E[:,0] + subs_matrix_CO2_E[:,i]
i = i + 1
print(decomp_tot_CO2_E[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1,label='S1')
#plt.plot(t,decomp_tot_CO2_S2,label='S2')
plt.plot(t,decomp_tot_CO2_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
type(decomp_tot_CO2_S1[:,0])
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_S1 = [c_firewood_energy_S1, decomp_tot_S1[:,0], TestDSM1.o, PH_Emissions_HWP1, decomp_tot_CO2_S1[:,0]]
Emissions_E = [c_firewood_energy_E, c_pellets_E, decomp_tot_E[:,0], TestDSME.o, PH_Emissions_HWPE, decomp_tot_CO2_E[:,0]]
Emissions_S1_C = [c_firewood_energy_S1, decomp_tot_S1_C[:,0], TestDSM1.o, PH_Emissions_HWP1, decomp_tot_CO2_S1[:,0]]
Emissions_E_C = [c_firewood_energy_E, c_pellets_E, decomp_tot_E_C[:,0], TestDSME.o, PH_Emissions_HWPE, decomp_tot_CO2_E[:,0]]
Emissions_RIL_S1 = [sum(x) for x in zip(*Emissions_S1)]
Emissions_RIL_E = [sum(x) for x in zip(*Emissions_E)]
Emissions_RIL_S1_C = [sum(x) for x in zip(*Emissions_S1_C)]
Emissions_RIL_E_C = [sum(x) for x in zip(*Emissions_E_C)]
#CH4_S1
Emissions_CH4_RIL_S1 = decomp_tot_CH4_S1[:,0]
#CH4_E
Emissions_CH4_RIL_E = decomp_tot_CH4_E[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S1 = Emissions_RIL_S1
#Col2_S2 = Emissions_RIL_S2
Col2_E = Emissions_RIL_E
Col2_S1_C = Emissions_RIL_S1_C
Col2_E_C = Emissions_RIL_E_C
Col3_1 = Emissions_CH4_RIL_S1
#Col3_2 = Emissions_CH4_RIL_S2
Col3_E = Emissions_CH4_RIL_E
Col4 = Emission_ref
Col5_1 = [x + y for x, y in zip(flat_list_RIL, RIL_seq_S1)]
Col5_E = [x + y for x, y in zip(flat_list_RIL, RIL_seq_E)]
Col5_C_1 = [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_S1)]
Col5_C_E = [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_E)]
df1 = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1,'kg_CH4':Col3_1,'kg_CO2_seq':Col5_1,'emission_ref':Col4})
#df2 = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S2,'kg_CH4':Col3_2,'kg_CO2_seq':Col5,'emission_ref':Col4})
dfE = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E,'kg_CH4':Col3_E,'kg_CO2_seq':Col5_E,'emission_ref':Col4})
df1_C = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_C,'kg_CH4':Col3_1,'kg_CO2_seq':Col5_C_1,'emission_ref':Col4})
dfE_C = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E_C,'kg_CH4':Col3_E,'kg_CO2_seq':Col5_C_E,'emission_ref':Col4})
writer = pd.ExcelWriter('emissions_seq_RIL_EC.xlsx', engine = 'xlsxwriter')
df1.to_excel(writer, sheet_name = 'RIL_S1', header=True, index=False )
#df2.to_excel(writer, sheet_name = 'RIL_S2', header=True, index=False)
dfE.to_excel(writer, sheet_name = 'RIL_E', header=True, index=False)
df1_C.to_excel(writer, sheet_name = 'RIL_C_S1', header=True, index=False )
dfE_C.to_excel(writer, sheet_name = 'RIL_C_E', header=True, index=False)
writer.save()
writer.close()
#%%
## DYNAMIC LCA (wood-based scenarios)
# Step (10): Set General Parameters for Dynamic LCA calculation
# General Parameters
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
##wood-based
#read S1
df = pd.read_excel('emissions_seq_RIL_EC.xlsx', 'RIL_S1') # can also index sheet by name or fetch all sheets
emission_CO2_S1 = df['kg_CO2'].tolist()
emission_CH4_S1 = df['kg_CH4'].tolist()
emission_CO2_seq_S1 = df['kg_CO2_seq'].tolist()
#read S2_C
df = pd.read_excel('emissions_seq_RIL_EC.xlsx', 'RIL_C_S1') # can also index sheet by name or fetch all sheets
emission_CO2_S1_C = df['kg_CO2'].tolist()
emission_CH4_S1_C = df['kg_CH4'].tolist()
emission_CO2_seq_S1_C = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read E
df = pd.read_excel('emissions_seq_RIL_EC.xlsx', 'RIL_E') # can also index sheet by name or fetch all sheets
emission_CO2_E = df['kg_CO2'].tolist()
emission_CH4_E = df['kg_CH4'].tolist()
emission_CO2_seq_E = df['kg_CO2_seq'].tolist()
#read E_EC
df = pd.read_excel('emissions_seq_RIL_EC.xlsx', 'RIL_C_E') # can also index sheet by name or fetch all sheets
emission_CO2_E_C = df['kg_CO2'].tolist()
emission_CH4_E_C = df['kg_CH4'].tolist()
emission_CO2_seq_E_C = df['kg_CO2_seq'].tolist()
#%%
#Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR)
#read S1
df = pd.read_excel('RIL_EC.xlsx', 'NonRW_RIL_S1') # can also index sheet by name or fetch all sheets
emission_NonRW_RIL_S1 = df['NonRW_emissions'].tolist()
emission_NonRW_RIL_S1_seq = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read E
df = pd.read_excel('RIL_EC.xlsx', 'NonRW_RIL_E') # can also index sheet by name or fetch all sheets
emission_NonRW_RIL_E = df['NonRW_emissions'].tolist()
emission_NonRW_RIL_E_seq = df['kg_CO2_seq'].tolist()
#%%
#Step (15): Determine the time elapsed dynamic characterization factors, DCF(t-ti), for CO2 and CH4
#DCF(t-i) CO2
matrix = (tf-1,tf-1)
DCF_CO2_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CO2_ti[i+1,t] = DCF_inst_CO2[t-i]
i = i + 1
print(DCF_CO2_ti)
#sns.heatmap(DCF_CO2_ti)
DCF_CO2_ti.shape
#DCF(t-i) CH4
matrix = (tf-1,tf-1)
DCF_CH4_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CH4_ti[i+1,t] = DCF_inst_CH4[t-i]
i = i + 1
print(DCF_CH4_ti)
#sns.heatmap(DCF_CH4_ti)
DCF_CH4_ti.shape
#%%
# Step (16): Calculate instantaneous global warming impact (GWI)
#Wood-based
#S1
t = np.arange(0,tf-1,1)
matrix_GWI_S1 = (tf-1,3)
GWI_inst_S1 = np.zeros(matrix_GWI_S1)
for t in range(0,tf-1):
GWI_inst_S1[t,0] = np.sum(np.multiply(emission_CO2_S1,DCF_CO2_ti[:,t]))
GWI_inst_S1[t,1] = np.sum(np.multiply(emission_CH4_S1,DCF_CH4_ti[:,t]))
GWI_inst_S1[t,2] = np.sum(np.multiply(emission_CO2_seq_S1,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1 = (tf-1,1)
GWI_inst_tot_S1 = np.zeros(matrix_GWI_tot_S1)
GWI_inst_tot_S1[:,0] = np.array(GWI_inst_S1[:,0] + GWI_inst_S1[:,1] + GWI_inst_S1[:,2])
print(GWI_inst_tot_S1[:,0])
t = np.arange(0,tf-1,1)
#S1_C
t = np.arange(0,tf-1,1)
matrix_GWI_S1_C = (tf-1,3)
GWI_inst_S1_C = np.zeros(matrix_GWI_S1_C)
for t in range(0,tf-1):
GWI_inst_S1_C[t,0] = np.sum(np.multiply(emission_CO2_S1_C,DCF_CO2_ti[:,t]))
GWI_inst_S1_C[t,1] = np.sum(np.multiply(emission_CH4_S1_C,DCF_CH4_ti[:,t]))
GWI_inst_S1_C[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_C,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_C = (tf-1,1)
GWI_inst_tot_S1_C = np.zeros(matrix_GWI_tot_S1_C)
GWI_inst_tot_S1_C[:,0] = np.array(GWI_inst_S1_C[:,0] + GWI_inst_S1_C[:,1] + GWI_inst_S1_C[:,2])
print(GWI_inst_tot_S1_C[:,0])
t = np.arange(0,tf-1,1)
#E
t = np.arange(0,tf-1,1)
matrix_GWI_E = (tf-1,3)
GWI_inst_E = np.zeros(matrix_GWI_E)
for t in range(0,tf-1):
GWI_inst_E[t,0] = np.sum(np.multiply(emission_CO2_E,DCF_CO2_ti[:,t]))
GWI_inst_E[t,1] = np.sum(np.multiply(emission_CH4_E,DCF_CH4_ti[:,t]))
GWI_inst_E[t,2] = np.sum(np.multiply(emission_CO2_seq_E,DCF_CO2_ti[:,t]))
matrix_GWI_tot_E = (tf-1,1)
GWI_inst_tot_E = np.zeros(matrix_GWI_tot_E)
GWI_inst_tot_E[:,0] = np.array(GWI_inst_E[:,0] + GWI_inst_E[:,1] + GWI_inst_E[:,2])
print(GWI_inst_tot_E[:,0])
#E_C
t = np.arange(0,tf-1,1)
matrix_GWI_E_C = (tf-1,3)
GWI_inst_E_C = np.zeros(matrix_GWI_E_C)
for t in range(0,tf-1):
GWI_inst_E_C[t,0] = np.sum(np.multiply(emission_CO2_E_C,DCF_CO2_ti[:,t]))
GWI_inst_E_C[t,1] = np.sum(np.multiply(emission_CH4_E_C,DCF_CH4_ti[:,t]))
GWI_inst_E_C[t,2] = np.sum(np.multiply(emission_CO2_seq_E_C,DCF_CO2_ti[:,t]))
matrix_GWI_tot_E_C = (tf-1,1)
GWI_inst_tot_E_C = np.zeros(matrix_GWI_tot_E_C)
GWI_inst_tot_E_C[:,0] = np.array(GWI_inst_E_C[:,0] + GWI_inst_E_C[:,1] + GWI_inst_E_C[:,2])
print(GWI_inst_tot_E_C[:,0])
#GWI_inst for all gases
##NonRW
#S1
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_RIL_S1 = (tf-1,2)
GWI_inst_NonRW_RIL_S1 = np.zeros(matrix_GWI_NonRW_RIL_S1)
for t in range(0,tf-1):
GWI_inst_NonRW_RIL_S1[t,0] = np.sum(np.multiply(emission_NonRW_RIL_S1,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_RIL_S1[t,1] = np.sum(np.multiply(emission_NonRW_RIL_S1_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_RIL_S1 = (tf-1,1)
GWI_inst_tot_NonRW_RIL_S1 = np.zeros(matrix_GWI_tot_NonRW_RIL_S1)
GWI_inst_tot_NonRW_RIL_S1[:,0] = np.array(GWI_inst_NonRW_RIL_S1[:,0] + GWI_inst_NonRW_RIL_S1[:,1])
print(GWI_inst_tot_NonRW_RIL_S1[:,0])
t = np.arange(0,tf-1,1)
#E
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_RIL_E = (tf-1,2)
GWI_inst_NonRW_RIL_E = np.zeros(matrix_GWI_NonRW_RIL_E)
for t in range(0,tf-1):
GWI_inst_NonRW_RIL_E[t,0] = np.sum(np.multiply(emission_NonRW_RIL_E,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_RIL_E[t,1] = np.sum(np.multiply(emission_NonRW_RIL_E_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_RIL_E = (tf-1,1)
GWI_inst_tot_NonRW_RIL_E = np.zeros(matrix_GWI_tot_NonRW_RIL_E)
GWI_inst_tot_NonRW_RIL_E[:,0] = np.array(GWI_inst_NonRW_RIL_E[:,0] + GWI_inst_NonRW_RIL_E[:,1])
print(GWI_inst_tot_NonRW_RIL_E[:,0])
#plotting
t = np.arange(0,tf-1,1)
#create zero list to highlight the horizontal line for 0
def zerolistmaker(n):
listofzeros = [0] * (n)
return listofzeros
#convert to flat list
GWI_inst_tot_NonRW_RIL_S1 = np.array([item for sublist in GWI_inst_tot_NonRW_RIL_S1 for item in sublist])
GWI_inst_tot_NonRW_RIL_E = np.array([item for sublist in GWI_inst_tot_NonRW_RIL_E for item in sublist])
GWI_inst_tot_S1 = np.array([item for sublist in GWI_inst_tot_S1 for item in sublist])
GWI_inst_tot_E = np.array([item for sublist in GWI_inst_tot_E for item in sublist])
GWI_inst_tot_S1_C = np.array([item for sublist in GWI_inst_tot_S1_C for item in sublist])
GWI_inst_tot_E_C = np.array([item for sublist in GWI_inst_tot_E_C for item in sublist])
plt.plot(t, GWI_inst_tot_NonRW_RIL_S1, color='forestgreen', label='NR_RIL_M_EC', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_NonRW_RIL_E, color='lightcoral', label='NR_RIL_E', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_S1, color='forestgreen', label='RIL_M_EC')
plt.plot(t, GWI_inst_tot_E, color='lightcoral', label='RIL_E_EC')
plt.plot(t, GWI_inst_tot_S1_C, color='turquoise', label='RIL_C_M_EC')
plt.plot(t, GWI_inst_tot_E_C, color='cornflowerblue', label='RIL_C_E_EC')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWI_inst_tot_NonRW_RIL_E, GWI_inst_tot_NonRW_RIL_S1, color='lightcoral', alpha=0.3)
#plt.fill_between(t, GWI_inst_tot_NonRW_RIL_S2, GWI_inst_tot_NonRW_RIL_E, color='lightcoral', alpha=0.3)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.xlim(0,200)
#plt.ylim(-5e-10,1e-10)
plt.title('Instantaneous GWI, RIL_EC')
plt.xlabel('Time (year)')
#plt.ylabel('GWI_inst (10$^{-13}$ W/m$^2$)')
plt.ylabel('GWI_inst (W/m$^2$)')
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_inst_NonRW_RIL_EC', dpi=300)
plt.show()
len(GWI_inst_tot_S1)
#%%
#Step (17): Calculate cumulative global warming impact (GWI)
#wood-based
GWI_cum_S1 = np.cumsum(GWI_inst_tot_S1)
GWI_cum_E = np.cumsum(GWI_inst_tot_E)
GWI_cum_S1_C = np.cumsum(GWI_inst_tot_S1_C)
GWI_cum_E_C = np.cumsum(GWI_inst_tot_E_C)
#NonRW
GWI_cum_NonRW_RIL_S1 = np.cumsum(GWI_inst_tot_NonRW_RIL_S1)
GWI_cum_NonRW_RIL_E = np.cumsum(GWI_inst_tot_NonRW_RIL_E)
#print(GWI_cum_S1)
t = np.arange(0,tf-1,1)
plt.xlabel('Time (year)')
#plt.ylabel('GWI_cum (10$^{-11}$ W/m$^2$)')
plt.ylabel('GWI_cum (W/m$^2$)')
plt.xlim(0,200)
#plt.ylim(-6e-8,0.5e-8)
plt.title('Cumulative GWI, RIL_EC')
plt.plot(t, GWI_cum_NonRW_RIL_S1, color='forestgreen', label='NR_RIL_M_EC', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_NonRW_RIL_E, color='lightcoral', label='NR_RIL_E_EC', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_S1, color='forestgreen', label='RIL_M_EC')
plt.plot(t, GWI_cum_E, color='lightcoral', label='RIL_E_EC')
plt.plot(t, GWI_cum_S1_C, color='turquoise', label='RIL_C_M_EC')
plt.plot(t, GWI_cum_E_C, color='cornflowerblue', label='RIL_C_E_EC')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
plt.grid(True)
#plt.fill_between(t, GWI_cum_NonRW_RIL_E, GWI_cum_NonRW_RIL_S1, color='lightcoral', alpha=0.3)
#plt.fill_between(t, GWI_cum_NonRW_RIL_S2, GWI_cum_NonRW_RIL_E, color='lightcoral', alpha=0.3)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_cum_Non_RW_RIL_EC', dpi=300)
plt.show()
len(GWI_cum_S1)
#%%
#Step (18): Determine the Instantenous and Cumulative GWI for the emission reference (1 kg CO2 emission at time zero) before performing dynamic GWP calculation
t = np.arange(0,tf-1,1)
matrix_GWI_ref = (tf-1,1)
GWI_inst_ref = np.zeros(matrix_GWI_S1)
for t in range(0,tf-1):
GWI_inst_ref[t,0] = np.sum(np.multiply(emission_CO2_ref,DCF_CO2_ti[:,t]))
#print(GWI_inst_ref[:,0])
len(GWI_inst_ref)
#determine the GWI cumulative for the emission reference
t = np.arange(0,tf-1,1)
GWI_cum_ref = np.cumsum(GWI_inst_ref[:,0])
#print(GWI_cum_ref)
plt.xlabel('Time (year)')
plt.ylabel('GWI_cum_ref (10$^{-13}$ W/m$^2$.kgCO$_2$)')
plt.plot(t, GWI_cum_ref)
len(GWI_cum_ref)
#%%
#Step (19): Calculate dynamic global warming potential (GWPdyn)
#Wood-based
GWP_dyn_cum_S1 = [x/(y*1000) for x,y in zip(GWI_cum_S1, GWI_cum_ref)]
GWP_dyn_cum_E = [x/(y*1000) for x,y in zip(GWI_cum_E, GWI_cum_ref)]
GWP_dyn_cum_S1_C = [x/(y*1000) for x,y in zip(GWI_cum_S1_C, GWI_cum_ref)]
GWP_dyn_cum_E_C = [x/(y*1000) for x,y in zip(GWI_cum_E_C, GWI_cum_ref)]
#NonRW
GWP_dyn_cum_NonRW_RIL_S1 = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_RIL_S1, GWI_cum_ref)]
GWP_dyn_cum_NonRW_RIL_E = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_RIL_E, GWI_cum_ref)]
t = np.arange(0,tf-1,1)
fig=plt.figure()
fig.show()
ax=fig.add_subplot(111)
ax.plot(t, GWP_dyn_cum_NonRW_RIL_S1, color='forestgreen',label='NR_RIL_M_EC', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_NonRW_RIL_E, color='lightcoral', label='NR_RIL_E', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_S1, color='forestgreen',label='RIL_M_EC')
ax.plot(t, GWP_dyn_cum_E, color='lightcoral', label='RIL_E')
ax.plot(t, GWP_dyn_cum_S1_C, color='turquoise',label='RIL_C_M_EC')
ax.plot(t, GWP_dyn_cum_E_C, color='cornflowerblue', label='RIL_C_E')
ax.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWP_dyn_cum_NonRW_RIL_E, GWP_dyn_cum_NonRW_RIL_S1, color='lightcoral', alpha=0.3)
#plt.fill_between(t, GWP_dyn_cum_NonRW_RIL_S2, GWP_dyn_cum_NonRW_RIL_E, color='lightcoral', alpha=0.3)
plt.grid(True)
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax.set_xlim(0,200)
#ax.set_ylim(-400,50)
ax.set_xlabel('Time (year)')
ax.set_ylabel('GWP$_{dyn}$ (t-CO$_2$-eq)')
ax.set_title('Dynamic GWP, RIL_EC')
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_cum_NonRW_RIL_EC', dpi=300)
plt.draw()
len(GWP_dyn_cum_S1)
#%%
#Step (20): Exporting the data behind result graphs to Excel
year = []
for x in range (0, 201):
year.append(x)
### Create Column
Col1 = year
##GWI_Inst
#GWI_inst from wood-based scenarios
Col_GI_1 = GWI_inst_tot_S1
Col_GI_3 = GWI_inst_tot_E
Col_GI_1_C = GWI_inst_tot_S1_C
Col_GI_3_C = GWI_inst_tot_E_C
#print(Col_GI_1)
#print(np.shape(Col_GI_1))
#GWI_inst from counter use scenarios
Col_GI_4 = GWI_inst_tot_NonRW_RIL_S1
Col_GI_6 = GWI_inst_tot_NonRW_RIL_E
#print(Col_GI_7)
#print(np.shape(Col_GI_7))
#create column results
##GWI_cumulative
#GWI_cumulative from wood-based scenarios
Col_GC_1 = GWI_cum_S1
Col_GC_3 = GWI_cum_E
Col_GC_1_C = GWI_cum_S1_C
Col_GC_3_C = GWI_cum_E_C
#GWI_cumulative from counter use scenarios
Col_GC_4 = GWI_cum_NonRW_RIL_S1
Col_GC_6 = GWI_cum_NonRW_RIL_E
#create column results
##GWPdyn
#GWPdyn from wood-based scenarios
Col_GWP_1 = GWP_dyn_cum_S1
Col_GWP_3 = GWP_dyn_cum_E
Col_GWP_1_C = GWP_dyn_cum_S1_C
Col_GWP_3_C = GWP_dyn_cum_E_C
#GWPdyn from counter use scenarios
Col_GWP_4 = GWP_dyn_cum_NonRW_RIL_S1
Col_GWP_6 = GWP_dyn_cum_NonRW_RIL_E
#Create colum results
dfM_EC_GI = pd.DataFrame.from_dict({'Year':Col1,'RIL_M_EC (W/m2)':Col_GI_1, 'RIL_C_M_EC (W/m2)':Col_GI_1_C,
'RIL_E_EC (W/m2)':Col_GI_3, 'RIL_C_E_EC (W/m2)':Col_GI_3_C,
'NR_RIL_M_EC (W/m2)':Col_GI_4, 'NR_RIL_E_EC (W/m2)':Col_GI_6})
dfM_EC_GC = pd.DataFrame.from_dict({'Year':Col1,'RIL_M_EC (W/m2)':Col_GC_1, 'RIL_C_M_EC (W/m2)':Col_GC_1_C,
'RIL_E_EC (W/m2)':Col_GC_3, 'RIL_C_E_EC (W/m2)':Col_GC_3_C,
'NR_RIL_M_EC (W/m2)':Col_GC_4, 'NR_RIL_E_EC (W/m2)':Col_GC_6})
dfM_EC_GWPdyn = pd.DataFrame.from_dict({'Year':Col1,'RIL_M_EC (t-CO2-eq)':Col_GWP_1, 'RIL_C_M_EC (t-CO2-eq)':Col_GWP_1_C,
'RIL_E_EC (t-CO2-eq)':Col_GWP_3, 'RIL_C_E_EC (t-CO2-eq)':Col_GWP_3_C,
'NR_RIL_M_EC (t-CO2-eq)':Col_GWP_4, 'NR_RIL_E_EC (t-CO2-eq)':Col_GWP_6})
#Export to excel
writer = pd.ExcelWriter('GraphResults_RIL_EC.xlsx', engine = 'xlsxwriter')
dfM_EC_GI.to_excel(writer, sheet_name = 'GWI_Inst_RIL_EC', header=True, index=False)
dfM_EC_GC.to_excel(writer, sheet_name = 'Cumulative GWI_RIL_EC', header=True, index=False)
dfM_EC_GWPdyn.to_excel(writer, sheet_name = 'GWPdyn_RIL_EC', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (21): Generate the excel file for the individual carbon emission and sequestration flows
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
division = 1000*44/12
division_CH4 = 1000*16/12
flat_list_RIL = [x/division for x in flat_list_RIL]
flat_list_RIL_C = [x/division for x in flat_list_RIL_C]
#RIL_M_existing
c_firewood_energy_S1 = [x/division for x in c_firewood_energy_S1]
decomp_tot_S1[:,0] = [x/division for x in decomp_tot_S1[:,0]]
RIL_seq_S1 = [x/division for x in RIL_seq_S1]
TestDSM1.o = [x/division for x in TestDSM1.o]
PH_Emissions_HWP1 = [x/division for x in PH_Emissions_HWP1]
#OC_storage_RIL_S1 = [x/division for x in OC_storage_RIL_S1]
decomp_tot_CO2_S1[:,0] = [x/division for x in decomp_tot_CO2_S1[:,0]]
decomp_tot_CH4_S1[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1[:,0]]
#RIL_C_M_existing
decomp_tot_S1_C[:,0] = [x/division for x in decomp_tot_S1_C[:,0]]
RIL_seq_C_S1 = [x/division for x in RIL_seq_C_S1]
#RIL_E
c_firewood_energy_E = [x/division for x in c_firewood_energy_E]
RIL_seq_E = [x/division for x in RIL_seq_E]
c_pellets_E = [x/division for x in c_pellets_E]
decomp_tot_E[:,0] = [x/division for x in decomp_tot_E[:,0]]
TestDSME.o = [x/division for x in TestDSME.o]
PH_Emissions_HWPE = [x/division for x in PH_Emissions_HWPE]
#OC_storage_RIL_E = [x/division for x in OC_storage_RIL_E]
decomp_tot_CO2_E[:,0] = [x/division for x in decomp_tot_CO2_E]
decomp_tot_CH4_E[:,0] = [x/division_CH4 for x in decomp_tot_CH4_E]
#RIL_C_E
decomp_tot_E_C[:,0] = [x/division for x in decomp_tot_E_C[:,0]]
RIL_seq_C_E = [x/division for x in RIL_seq_C_E]
#landfill aggregate flows
Landfill_decomp_S1 = decomp_tot_CH4_S1, decomp_tot_CO2_S1
Landfill_decomp_E = decomp_tot_CH4_E, decomp_tot_CO2_E
Landfill_decomp_S1 = [sum(x) for x in zip(*Landfill_decomp_S1)]
Landfill_decomp_E = [sum(x) for x in zip(*Landfill_decomp_E)]
Landfill_decomp_S1 = [item for sublist in Landfill_decomp_S1 for item in sublist]
Landfill_decomp_E = [item for sublist in Landfill_decomp_E for item in sublist]
Column1 = year
Column7 = flat_list_RIL
#RIL_E_EC
Column8 = c_firewood_energy_E
Column8_1 = c_pellets_E
Column9 = decomp_tot_E[:,0]
Column9_C = decomp_tot_E_C[:,0]
Column10 = TestDSME.o
Column11 = PH_Emissions_HWPE
#Column12_1 = OC_storage_RIL_E
Column12 = Landfill_decomp_E
#RIL_C_M_EC
Column13 = c_firewood_energy_S1
Column14 = decomp_tot_S1[:,0]
Column14_C = decomp_tot_S1_C[:,0]
Column15 = TestDSM1.o
Column16 = PH_Emissions_HWP1
#Column17_1 = OC_storage_RIL_S1
Column17 = Landfill_decomp_S1
dfM_exst = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':[x + y for x, y in zip(flat_list_RIL, RIL_seq_S1)],
# '9: Landfill storage (t-C)': Column17_1,
'F1-0: Residue decomposition (t-C)':Column14,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column13,
'F8-0: Operational stage/processing emissions (t-C)':Column16,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column15,
'F7-0: Landfill gas decomposition (t-C)':Column17})
dfM_exst_C = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':[x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_S1)],
# '9: Landfill storage (t-C)': Column17_1,
'F1-0: Residue decomposition (t-C)':Column14_C,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column13,
'F8-0: Operational stage/processing emissions (t-C)':Column16,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column15,
'F7-0: Landfill gas decomposition (t-C)':Column17})
dfE = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':[x + y for x, y in zip(flat_list_RIL, RIL_seq_E)],
#'9: Landfill storage (t-C)': Column12_1,
'F1-0: Residue decomposition (t-C)':Column9,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column8,
'F8-0: Operational stage/processing emissions (t-C)':Column11,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column10,
'F7-0: Landfill gas decomposition (t-C)':Column12,
'F4-0: Emissions from wood pellets use (t-C)': Column8_1})
dfE_C = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':[x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_E)],
#'9: Landfill storage (t-C)': Column12_1,
'F1-0: Residue decomposition (t-C)':Column9_C,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column8,
'F8-0: Operational stage/processing emissions (t-C)':Column11,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column10,
'F7-0: Landfill gas decomposition (t-C)':Column12,
'F4-0: Emissions from wood pellets use (t-C)': Column8_1})
writer = pd.ExcelWriter('C_flows_RIL_EC.xlsx', engine = 'xlsxwriter')
dfM_exst.to_excel(writer, sheet_name = 'RIL_M_EC', header=True, index=False)
dfE.to_excel(writer, sheet_name = 'RIL_E_EC', header=True, index=False)
dfM_exst_C.to_excel(writer, sheet_name = 'RIL_C_M_EC', header=True, index=False)
dfE_C.to_excel(writer, sheet_name = 'RIL_C_E_EC', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (22): Plot of the individual carbon emission and sequestration flows for normal and symlog-scale graphs
#RIL_M_EC (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax1_s=fig.add_subplot(111)
ax1_s.plot(t, [x + y for x, y in zip(flat_list_RIL, RIL_seq_S1)], color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1_s.plot(t, OC_storage_RIL_S1, color='darkturquoise', label='9: Landfill storage')
ax1_s.plot(t, decomp_tot_S1[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax1_s.plot(t, c_firewood_energy_S1, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1_s.plot(t, PH_Emissions_HWP1, color = 'orange', label='F8-0: Operational stage/processing emissions')
ax1_s.plot(t, TestDSM1.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1_s.plot(t, Landfill_decomp_S1, color='yellow', label= 'F7-0: Landfill gas decomposition')
ax1_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1_s.set_xlim(-1,200)
ax1_s.set_yscale('symlog')
ax1_s.set_xlabel('Time (year)')
ax1_s.set_ylabel('C flows(t-C) (symlog)')
ax1_s.set_title('Carbon flow, RIL_M_EC (symlog-scale)')
plt.draw()
#%%
#RIL_M_EC (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax1=fig.add_subplot(111)
ax1.plot(t, [x + y for x, y in zip(flat_list_RIL, RIL_seq_S1)], color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1.plot(t, OC_storage_RIL_S1, color='darkturquoise', label='9: Landfill storage')
ax1.plot(t, decomp_tot_S1[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax1.plot(t, c_firewood_energy_S1, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1.plot(t, PH_Emissions_HWP1, color = 'orange', label='F8-0: Operational stage/processing emissions')
ax1.plot(t, TestDSM1.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1.plot(t, Landfill_decomp_S1, color='yellow', label= 'F7-0: Landfill gas decomposition')
ax1.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1.set_xlim(-1,200)
ax1.set_ylim(-3,10)
ax1.set_xlabel('Time (year)')
ax1.set_ylabel('C flows(t-C)')
ax1.set_title('Carbon flow, RIL_M_EC')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
plt.draw()
#%%
#RIL_C_M_EC (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax1_C_s=fig.add_subplot(111)
ax1_C_s.plot(t, [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_S1)], color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1_s.plot(t, OC_storage_RIL_S1, color='darkturquoise', label='9: Landfill storage')
ax1_C_s.plot(t, decomp_tot_S1_C[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax1_C_s.plot(t, c_firewood_energy_S1, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1_C_s.plot(t, PH_Emissions_HWP1, color = 'orange', label='F8-0: Operational stage/processing emissions')
ax1_C_s.plot(t, TestDSM1.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1_C_s.plot(t, Landfill_decomp_S1, color='yellow', label= 'F7-0: Landfill gas decomposition')
ax1_C_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1_C_s.set_xlim(-1,200)
ax1_C_s.set_yscale('symlog')
ax1_C_s.set_xlabel('Time (year)')
ax1_C_s.set_ylabel('C flows(t-C) (symlog)')
ax1_C_s.set_title('Carbon flow, RIL_M_EC (symlog-scale)')
plt.draw()
#%%
#RIL_M_EC (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax1_C=fig.add_subplot(111)
ax1_C.plot(t, [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_S1)], color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1.plot(t, OC_storage_RIL_S1, color='darkturquoise', label='9: Landfill storage')
ax1_C.plot(t, decomp_tot_S1_C[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax1_C.plot(t, c_firewood_energy_S1, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1_C.plot(t, PH_Emissions_HWP1, color = 'orange', label='F8-0: Operational stage/processing emissions')
ax1_C.plot(t, TestDSM1.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1_C.plot(t, Landfill_decomp_S1, color='yellow', label= 'F7-0: Landfill gas decomposition')
ax1_C.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1_C.set_xlim(-1,200)
ax1_C.set_ylim(-3,10)
ax1_C.set_xlabel('Time (year)')
ax1_C.set_ylabel('C flows(t-C)')
ax1_C.set_title('Carbon flow, RIL_M_EC')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
plt.draw()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#RIL_E_EC (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax2_s=fig.add_subplot(111)
ax2_s.plot(t, [x + y for x, y in zip(flat_list_RIL, RIL_seq_E)], color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2_s.plot(t, OC_storage_RIL_E, color='darkturquoise', label='9: Landfill storage')
ax2_s.plot(t, decomp_tot_E[:,0], color='lightcoral', label='1F1-0: Residue decomposition')
ax2_s.plot(t, c_firewood_energy_E, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2_s.plot(t, PH_Emissions_HWPE, color = 'orange', label='F8-0: Operational stage/processing emissions')
ax2_s.plot(t, Landfill_decomp_E, color='yellow', label= 'F7-0: Landfill gas decomposition')
ax2_s.plot(t, c_pellets_E, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax2_s.plot(t, TestDSME.o, label='in-use stock output')
ax2_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2_s.set_xlim(-1,200)
ax2_s.set_yscale('symlog')
ax2_s.set_xlabel('Time (year)')
ax2_s.set_ylabel('C flows(t-C) (symlog)')
ax2_s.set_title('Carbon flow, RIL_E_EC (symlog-scale)')
plt.draw()
#%%
#plot for the individual carbon flows
#RIL_E_EC (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax2=fig.add_subplot(111)
ax2.plot(t, [x + y for x, y in zip(flat_list_RIL, RIL_seq_E)], color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2.plot(t, OC_storage_RIL_E, color='darkturquoise', label='9: Landfill storage')
ax2.plot(t, decomp_tot_E[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax2.plot(t, c_firewood_energy_E, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy usey')
ax2.plot(t, PH_Emissions_HWPE, color = 'orange', label='F8-0: Operational stage/processing emissions')
ax2.plot(t, Landfill_decomp_E, color='yellow', label= 'F7-0: Landfill gas decomposition')
ax2.plot(t, c_pellets_E, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax2.plot(t, TestDSME.o, label='in-use stock output')
ax2.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2.set_xlim(-1,200)
ax2.set_ylim(-3,10)
ax2.set_xlabel('Time (year)')
ax2.set_ylabel('C flows(t-C)')
ax2.set_title('Carbon flow, RIL_E_EC')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_E')
plt.draw()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#RIL_E_EC (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax2_C_s=fig.add_subplot(111)
ax2_C_s.plot(t, [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_E)], color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2_s.plot(t, OC_storage_RIL_E, color='darkturquoise', label='9: Landfill storage')
ax2_C_s.plot(t, decomp_tot_E_C[:,0], color='lightcoral', label='1F1-0: Residue decomposition')
ax2_C_s.plot(t, c_firewood_energy_E, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2_C_s.plot(t, PH_Emissions_HWPE, color = 'orange', label='F8-0: Operational stage/processing emissions')
ax2_C_s.plot(t, Landfill_decomp_E, color='yellow', label= 'F7-0: Landfill gas decomposition')
ax2_C_s.plot(t, c_pellets_E, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax2_s.plot(t, TestDSME.o, label='in-use stock output')
ax2_C_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2_C_s.set_xlim(-1,200)
ax2_C_s.set_yscale('symlog')
ax2_C_s.set_xlabel('Time (year)')
ax2_C_s.set_ylabel('C flows(t-C) (symlog)')
ax2_C_s.set_title('Carbon flow, RIL_E_EC (symlog-scale)')
plt.draw()
#%%
#plot for the individual carbon flows
#RIL_E_EC (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax2_C=fig.add_subplot(111)
ax2_C.plot(t, [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_E)], color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2.plot(t, OC_storage_RIL_E, color='darkturquoise', label='9: Landfill storage')
ax2_C.plot(t, decomp_tot_E_C[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax2_C.plot(t, c_firewood_energy_E, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy usey')
ax2_C.plot(t, PH_Emissions_HWPE, color = 'orange', label='F8-0: Operational stage/processing emissions')
ax2_C.plot(t, Landfill_decomp_E, color='yellow', label= 'F7-0: Landfill gas decomposition')
ax2_C.plot(t, c_pellets_E, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax2.plot(t, TestDSME.o, label='in-use stock output')
ax2_C.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2_C.set_xlim(-1,200)
ax2_C.set_ylim(-3,10)
ax2_C.set_xlabel('Time (year)')
ax2_C.set_ylabel('C flows(t-C)')
ax2_C.set_title('Carbon flow, RIL_E_EC')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_E')
plt.draw()
#%%
#Step (23): Generate the excel file for the net carbon balance
Agg_Cflow_S1 = [c_firewood_energy_S1, RIL_seq_S1, decomp_tot_S1[:,0], TestDSM1.o, PH_Emissions_HWP1, Landfill_decomp_S1, flat_list_RIL]
Agg_Cflow_E = [c_firewood_energy_E, RIL_seq_E, c_pellets_E, decomp_tot_E[:,0], TestDSME.o, PH_Emissions_HWPE, Landfill_decomp_E, flat_list_RIL]
Agg_Cflow_S1_C = [c_firewood_energy_S1, RIL_seq_C_S1, decomp_tot_S1_C[:,0], TestDSM1.o, PH_Emissions_HWP1, Landfill_decomp_S1, flat_list_RIL_C]
Agg_Cflow_E_C = [c_firewood_energy_E, RIL_seq_C_E, c_pellets_E, decomp_tot_E_C[:,0], TestDSME.o, PH_Emissions_HWPE, Landfill_decomp_E, flat_list_RIL_C]
Agg_Cflow_RIL_S1 = [sum(x) for x in zip(*Agg_Cflow_S1)]
Agg_Cflow_RIL_E = [sum(x) for x in zip(*Agg_Cflow_E)]
Agg_Cflow_RIL_S1_C = [sum(x) for x in zip(*Agg_Cflow_S1_C)]
Agg_Cflow_RIL_E_C = [sum(x) for x in zip(*Agg_Cflow_E_C)]
#create column year
year = []
for x in range (0, 201):
year.append(x)
print (year)
#Create colum results
dfM_RIL_EC = pd.DataFrame.from_dict({'Year':year,'RIL_M_EC (t-C)':Agg_Cflow_RIL_S1, 'RIL_C_M_EC (t-C)':Agg_Cflow_RIL_S1_C,
'RIL_E_EC (t-C)':Agg_Cflow_RIL_E, 'RIL_C_E_EC (t-C)':Agg_Cflow_RIL_E_C})
#Export to excel
writer = pd.ExcelWriter('AggCFlow_RIL_EC.xlsx', engine = 'xlsxwriter')
dfM_RIL_EC.to_excel(writer, sheet_name = 'RIL_EC', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (24): Plot the net carbon balance
fig=plt.figure()
fig.show()
ax3=fig.add_subplot(111)
# plot
ax3.plot(t, Agg_Cflow_RIL_S1, color='forestgreen', label='RIL_M_EC')
ax3.plot(t, Agg_Cflow_RIL_E, color='lightcoral', label='RIL_E_EC')
ax3.plot(t, Agg_Cflow_RIL_S1_C, color='turquoise', label='RIL_M_EC')
ax3.plot(t, Agg_Cflow_RIL_E_C, color='cornflowerblue', label='RIL_E_EC')
ax3.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
ax3.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax3.set_xlim(-1,200)
#ax3.set_yscale('symlog')
ax3.set_xlabel('Time (year)')
ax3.set_ylabel('C flows (t-C)')
ax3.set_title('Net carbon balance, RIL_EC')
plt.draw()
#%%
#Step (25): Generate the excel file for documentation of individual carbon flows in the system definition (Fig. 1)
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
Column1 = year
division = 1000*44/12
division_CH4 = 1000*16/12
##RIL_S1
## define the input flow for the landfill (F5-7)
OC_storage_S1 = df1['Other_C_storage'].values
OC_storage_S1 = [x/division for x in OC_storage_S1]
OC_storage_S1 = [abs(number) for number in OC_storage_S1]
C_LF_S1 = [x*1/0.82 for x in OC_storage_S1]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S1 = [x/division for x in df1['Input_PF'].values]
HWP_S1_energy = [x*1/3 for x in c_firewood_energy_S1]
HWP_S1_landfill = [x*1/0.82 for x in OC_storage_S1]
HWP_S1_sum = [HWP_S1, HWP_S1_energy, HWP_S1_landfill]
HWP_S1_sum = [sum(x) for x in zip(*HWP_S1_sum )]
#in-use stocks (S-4)
TestDSM1.s = [x/division for x in TestDSM1.s]
#TestDSM1.i = [x/division for x in TestDSM1.i]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S1 = (tf,1)
stocks_S1 = np.zeros(zero_matrix_stocks_S1)
i = 0
stocks_S1[0] = C_LF_S1[0] - Landfill_decomp_S1[0]
while i < tf-1:
stocks_S1[i+1] = np.array(C_LF_S1[i+1] - Landfill_decomp_S1[i+1] + stocks_S1[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S1 = [x1+x2 for (x1,x2) in zip(HWP_S1_sum, [x*2/3 for x in c_firewood_energy_S1])]
##RIL_M_EC: calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S1 = (tf,1)
ForCstocks_S1 = np.zeros(zero_matrix_ForCstocks_S1)
i = 0
ForCstocks_S1[0] = initAGB - flat_list_RIL[0] - decomp_tot_S1[0] - HWP_logged_S1[0]
while i < tf-1:
ForCstocks_S1[i+1] = np.array(ForCstocks_S1[i] - flat_list_RIL[i+1] - decomp_tot_S1[i+1] - HWP_logged_S1[i+1])
i = i + 1
##RIL_C_M_EC: calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S1_C = (tf,1)
ForCstocks_S1_C = np.zeros(zero_matrix_ForCstocks_S1_C)
i = 0
ForCstocks_S1_C[0] = initAGB - flat_list_RIL[0] - decomp_tot_S1_C[0] - HWP_logged_S1[0]
while i < tf-1:
ForCstocks_S1_C[i+1] = np.array(ForCstocks_S1_C[i] - flat_list_RIL[i+1] - decomp_tot_S1_C[i+1] - HWP_logged_S1[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df1_amount = | pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'NonRW_RIL_S1') | pandas.read_excel |
import pandas as pd
data = | pd.read_csv("data/2016.csv") | pandas.read_csv |
# get all your fuckin imports
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
#get your portfolio tickets
# I am going to get my ticker for stock and mutual funds seperate to compare and see what is performing well
stck_tickers = ['AAPL', 'ENB', 'MDT', 'NKE', 'BRK-B']
myPortfolio = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as nr
import math
import os
from datetime import datetime
from sklearn.linear_model import LinearRegression, SGDRegressor
import sys
import time
import imp
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor, plot_importance
from sklearn.model_selection import train_test_split
import lightgbm as lgb
def drop_duplicate(data, sub_set):
print('Before drop shape:', data.shape)
before = data.shape[0]
data.drop_duplicates(sub_set, keep='first', inplace=True)
data.reset_index(drop=True, inplace=True)
print('After drop shape:', data.shape)
after = data.shape[0]
print('Total Duplicate:', before - after)
def rmse(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
class predict(object):
def __init__(self,trainfile,testfile):
self.trainfile = trainfile
self.testfile = testfile
self.__lr = LinearRegression()
# self.__dtree = DecisionTreeClassifier()
# self.__rforest = RandomForestClassifier()
# self.__svm = SVC(kernel='rbf')
self.lgb_params = {
'feature_fraction': 1,
'metric': 'rmse',
'min_data_in_leaf': 16,
'bagging_fraction': 0.85,
'learning_rate': 0.03,
'objective': 'mse',
'bagging_seed': 2 ** 7,
'num_leaves': 32,
'bagging_freq': 3,
'verbose': 0
}
self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=38,random_state=50)
self._xgb = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300,colsample_bytree=0.9,subsample=0.9,eta=0.15,seed=42)
self.train_data = None
self.train_labels = None
self.train_data1 = None
self.train_labels1 = None
self.val_data = None
self.val_labels = None
self.test_data = None
self.predicted_labels = None
self.x_train_val = None
self.y_train_val = None
def trainingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.trainfile,parse_dates=['date'],date_parser=parser)
df = df.dropna()
df = df.loc[df['item_cnt_day']>0]
subset_train = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_cnt_day']
drop_duplicate(df, sub_set=subset_train)
median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.date_block_num == 4) & (df.item_price > 0)].item_price.median()
df.loc[df.item_price < 0, 'item_price'] = median
df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)
df['item_price'] = df['item_price'].clip(0, 300000)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price','item_cnt_day']]
df['item_id'] = np.log1p(df['item_id'])
self.train_labels1 = df['item_cnt_day']
self.train_data1 = df.drop(columns='item_cnt_day')
self.train_data,self.val_data,self.train_labels,self.val_labels=train_test_split(self.train_data1,self.train_labels1,test_size=0.3)
self.x_train_val = self.train_data[-100:]
self.y_train_val = self.train_labels[-100:]
def testingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = | pd.read_csv(self.testfile,parse_dates=['date'],date_parser=parser) | pandas.read_csv |
# coding: utf-8
# # Notebook to generate a dataframe that captures data reliability
# Perform a series of tests/questions on each row and score the result based on 0 (missing), 1 (ambiguous), 2 (present)
# - is the plot number recorded? If not, this makes it very difficult to identify the plot as unique vs others (2 if different from 1.2)
# - is the type of property recorded? Very difficult to interpret the results if we don’t know this
# - does the plot have a zone? (Other means ambiguous)
# - does the plot have a zone section?
# - does the plot have toilets (sum should not include disused)
# - does the plot receive water?
# - who is the respondent (2 for landlord, 1 for caretaker and tenant, 0 for unknown)
# - was gps info captured?)
# - does the number of users sum up to the initial value
# - do they know where they dispose of solid wastes?
# - do they know if the toilet has been upgraded- no not reliable if they haven’t been there 2 years
# - Do they know they age of toilet?
# - Do they give age of toilet if more than 1
# - Do they know if the toilet has been emptied?
# - Do they know how much they spent?
# - Do they know how often they empty it?
# - Do they give a value for emptying it but have never actually emptied it
# - Is the toilet accessible but has never been emptied?
# - Is property recorded as not residential but a tenant answering questions
# - Toilet is not feasible for emptying but they have
#
# ## List of possible inconsistencies that people have mentioned (excluding geospatial which are being dealt with separately
# - visit information
# - length of time of responder on plot - if units is not a number
# - weird time of visit
# - plot types
# - no Record plot number
# - record plot numbers are not equal
# - zone and gps don't correspond
# - number of families on the plot
# - number of people on plot
# - people living on the plot vs toilet users
# - toilet types
# - no toilets
# In[2]:
import pandas as pd
# In[7]:
pd.options.display.max_rows = 300
pd.options.display.max_columns = 300
pd.options.display.max_colwidth = 300
# In[289]:
data_orig = pd.read_hdf('../data/wsup/tidy/data_tidied.h5', key='main')
# In[290]:
name_changes = dict([line.strip().split('\t') for line in open('../data/name_changes.txt')])
# In[291]:
data = data_orig.rename(columns=name_changes)
# In[292]:
drops = [line.strip() for line in open('../data/drop2.txt')]
drops = data.columns.intersection(drops)
data.drop(drops, 1, inplace=True)
# In[293]:
data.shape
# In[294]:
drops = [d for d in data.columns if not (d.startswith('bool') or d.startswith('cat') or d.startswith('str') or
d.startswith('num') or d.startswith('id') or d.startswith('date'))]
drops
data.drop(drops, 1, inplace=True)
# In[295]:
data.head(10).T
# In[85]:
results = pd.DataFrame(index = data.index)
# ## Plot id
# In[347]:
data[['id_new_plot_id', 'id_plot', 'str_plot_id', 'bool_plot_id']].head()
# In[87]:
results['Plot_id'] = data[['id_new_plot_id', 'id_plot', 'str_plot_id']].apply(
lambda x: 2 if x[0] != 'None' and x[1]==x[2] else 1 if (x[0]!= 'None') else 0, axis=1)
# In[88]:
results['Plot_id'].value_counts()
# In[346]:
data.loc[results.Plot_id == 2, ['id_new_plot_id', 'id_plot', 'str_plot_id']].head()
# ## Property type
# In[91]:
p = 'Property_type'
cols = ['cat_property','cat_property_other']
# In[96]:
results[p] = data[cols].apply(
lambda x: 2 if not ('Other' in str(x[0])) or pd.isnull(x[0]) else 1 if pd.notnull(x[1]) else 0, axis=1)
# In[97]:
results[p].value_counts()
# In[100]:
data.loc[results[p] == 2, cols]
# In[102]:
data.loc[results[p] == 2, cols[0]].value_counts()
# ## Property zone
# In[104]:
p = 'Property_zone'
cols = [ 'cat_zone', 'cat_zone_other', 'cat_zone_section',
'cat_zone_section_other', 'str_zone_name']
# In[105]:
data[cols]
# In[109]:
results[p] = data[cols].apply(
lambda x: 2 if not ('Other' in str(x[0])) or pd.isnull(x[0]) else 1 if x.notnull().sum()>1 else 0, axis=1)
# In[110]:
results[p].value_counts()
# In[111]:
data.loc[results[p] == 0, cols]
# In[112]:
data.loc[results[p] == 2, cols[0]].value_counts()
# In[ ]:
# ## Toilets
# - only relevant for residential
# - suspicious if more than 1 toilet per person or no toilets
# - 0 if info unknown
# In[296]:
data['num_toilets_per_person'] = data['num_toilets_all'] / data['num_ppl'].map(lambda x: np.NaN if x==0 else x)
# In[297]:
p = 'Toilets_total'
cols = [ 'num_toilets_all', 'num_toilets_per_person', 'cat_property']
# In[298]:
data.loc[data['num_toilets_per_person'].notnull(), 'num_toilets_per_person'].hist(bins=50)
# In[299]:
data[cols]
# In[300]:
results[p] = data[cols].apply(
lambda x: np.NaN if x[2] != 'Residential Plot' else
2 if x[0]>0 and x[1]>0 and x[1] <=1 else
1 if (x[0]==0) or (x[1]>1) else
0, axis=1)
# In[301]:
results[p].value_counts()
# In[302]:
data.loc[results[p] == 0, cols]
# In[303]:
data.loc[results[p] == 2, cols[0]].value_counts()
# In[ ]:
# ## Water
# In[119]:
p = 'Water_collection'
cols = [ 'cat_water', 'cat_water_other']
# In[128]:
data[cols]
# In[130]:
results[p] = data[cols].apply(
lambda x: 2 if 'Other' not in str(x[0]) and pd.notnull(x[0]) else 1 if pd.notnull(x[1]) else 0, axis=1)
# In[131]:
results[p].value_counts()
# In[132]:
data.loc[results[p] == 0, cols]
# In[133]:
data.loc[results[p] == 2, cols[0]].value_counts()
# ## Respondent
# In[146]:
p = 'Respondent_type'
cols = [ 'cat_responder_type']
# In[147]:
data[cols]
# In[152]:
results[p] = data[cols].apply(
lambda x: 2 if x[0]=='Landlord' else 1 if x[0] in ['Caretaker', 'Tenant'] else 0, axis=1)
# In[153]:
results[p].value_counts()
# In[154]:
data.loc[results[p] == 0, cols]
# In[155]:
data.loc[results[p] == 2, cols[0]].value_counts()
# # GPS information
# In[159]:
data.columns
# In[161]:
p = 'GPS_presence'
cols = ['str_gps_lat']
# In[162]:
data[cols]
# In[163]:
results[p] = data[cols].apply(
lambda x: 2 if pd.notnull(x[0]) else 0, axis=1)
# In[164]:
results[p].value_counts()
# In[165]:
data.loc[results[p] == 0, cols]
# In[166]:
data.loc[results[p] == 2, cols[0]].value_counts()
# # Number of users and num of ppl
# In[168]:
p = 'People_numbers_consistency'
cols = ['num_ppl', 'num_c_m', 'num_c_f', 'num_a_m',
'num_a_f']
# In[169]:
data[cols]
# In[170]:
results[p] = data[cols].apply(
lambda x: 2 if x[1:].sum()==x[0] else 1 if abs(x[1:].sum()-x[0]) < 2 else 0, axis=1)
# In[171]:
results[p].value_counts()
# In[172]:
data.loc[results[p] == 0, cols]
# In[173]:
data.loc[results[p] == 2, cols[0]].value_counts()
# ## People household - not relevant if not residential
# In[304]:
import numpy as np
# In[305]:
data['ppl_per_household'] = data['num_ppl'] / data['num_hhs']
# In[306]:
p = 'People_household'
cols = ['num_ppl', 'num_hhs', 'cat_property', 'ppl_per_household']
# In[307]:
ax = data['ppl_per_household'].hist(bins=100)
ax.set_yscale('log')
ax.set_xscale('log')
# In[308]:
data.loc[(data.cat_property == 'Residential Plot') & (data.ppl_per_household > 20),cols]
# In[309]:
results[p] = data[cols].apply(
lambda x: np.NaN if x[2] != 'Residential Plot' else
2 if x[0] > x[1] and x[3] <=20 else 1 if x.isnull().sum()==0 else 0, axis=1)
# In[310]:
data.columns
# In[311]:
data.loc[results[p] == 2, cols]
# In[312]:
results[p].value_counts()
# In[180]:
data.loc[results[p] == 2, cols]
# ## Solid wastes
# In[244]:
p = 'Solid waste'
cols = [ 'cat_waste', 'cat_waste_other']
# In[245]:
data[cols].head()
# In[246]:
results[p] = data[cols].apply(
lambda x: 2 if 'Other' not in str(x[0]) and | pd.notnull(x[0]) | pandas.notnull |
import pandas as pd
import instances.dinamizators.dinamizators as din
import math
def simplest_test():
'''
Test if the dinamizators are running
'''
df = (
pd.read_pickle('./instances/analysis/df_requests.zip')
.reset_index()
)
din.dinamize_as_berbeglia(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5,
60)
din.dinamize_as_pureza_laporte(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.pickup_lower_tw,
df.pickup_upper_tw,
0)
din.dinamize_as_pankratz(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5)
din.dinamize_as_fabri_recht(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_lower_tw,
df.delivery_upper_tw)
def test_calculate_travel_time():
pickup_location_x_coord = -1
pickup_location_y_coord = -1
delivery_location_x_coord = 1
delivery_location_y_coord = 1
expected_travel_time = math.ceil(math.sqrt(2) + math.sqrt(2))
calculated_travel_time = (
din.calculate_travel_time(
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord)
)
assert (expected_travel_time == calculated_travel_time)
def test_series_elementwise_max():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_max = pd.Series([3, 2, 3])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all()
def test_dataframe_elementwise_max():
x = pd.DataFrame([[1, 2, 3], [3, 2, 1]])
y = pd.DataFrame([[3, 2, 1], [1, 2, 3]])
expected_max = pd.DataFrame([[3, 2, 3], [3, 2, 3]])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all().all()
def test_series_elementwise_min():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_min = pd.Series([1, 2, 1])
calculated_min = din.elementwise_min(x, y)
assert (expected_min == calculated_min).all()
def test_dataframe_elementwise_min():
x = pd.DataFrame([[1, 2, 3], [3, 2, 1]])
y = | pd.DataFrame([[3, 2, 1], [1, 2, 3]]) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: utilities.ipynb (unless otherwise specified).
__all__ = ['make_codes', 'make_data', 'get_rows', 'extract_codes', 'Info', 'memory', 'listify', 'reverse_dict',
'del_dot', 'del_zero', 'expand_hyphen', 'expand_star', 'expand_colon', 'expand_regex', 'expand_code',
'expand_columns', 'format_codes', 'insert_external', 'unique', 'count_codes', 'lookup_codes', 'get_codes']
# Cell
import re
import numpy as np
import pandas as pd
from functools import singledispatch
# Cell
def make_codes(n=100, letters=26, numbers=100, seed=False):
"""
Generate a dataframe with a column of random codes
Args:
letters (int): The number of different letters to use
numbers (int): The number of different numbers to use
Returns
A dataframe with a column with one or more codes in the rows
"""
# each code is assumed to consist of a letter and a number
alphabet = list('abcdefghigjklmnopqrstuvwxyz')
letters=alphabet[:letters+1]
# make random numbers same if seed is specified
if seed:
np.random.seed(0)
# determine the number of codes to be drawn for each event
n_codes=np.random.negative_binomial(1, p=0.3, size=n)
# avoid zero (all events have to have at least one code)
n_codes=n_codes+1
# for each event, randomly generate a the number of codes specified by n_codes
codes=[]
for i in n_codes:
diag = [np.random.choice(letters).upper()+
str(int(np.random.uniform(low=1, high=numbers)))
for num in range(i)]
code_string=','.join(diag)
codes.append(code_string)
# create a dataframe based on the list
df=pd.DataFrame(codes)
df.columns=['code']
return df
# Cell
def make_data(n=100, letters=26, numbers=100, seed=False, expand=False,
columns=['pid', 'gender', 'birth_date', 'date', 'region', 'codes']):
"""
Generate a dataframe with a column of random codes
Args:
letters (int): The number of different letters to use
numbers (int): The number of different numbers to use
Returns
A dataframe with a column with one or more codes in the rows
Examples
>>>df = make_data(n=100, letters=5, numbers=5, seed=True)
"""
if seed:
np.random.seed(seed=seed)
pid = range(n)
df_person=pd.DataFrame(index = pid)
#female = np.random.binomial(1, 0.5, size =n)
gender = np.random.choice(['male', 'female'], size=n)
region = np.random.choice(['north', 'south', 'east', 'west'], size=n)
birth_year = np.random.randint(1920, 2019, size=n)
birth_month = np.random.randint(1,12, size=n)
birth_day = np.random.randint(1,28, size=n) # ok, I know!
events_per_year = np.random.poisson(1, size=n)
years = 2020 - birth_year
events = years * events_per_year
events = np.where(events==0,1,events)
events = events.astype(int)
all_codes=[]
codes = [all_codes.extend(make_codes(n=n, letters=letters,
numbers=numbers,
seed=seed)['code'].tolist())
for n in events]
days_alive = (2020 - birth_year) *365
days_and_events = zip(days_alive.tolist(), events.tolist())
all_days=[]
days_after_birth = [all_days.extend(np.random.randint(0, max_day, size=n)) for max_day, n in days_and_events]
pid_and_events = zip(list(pid), events.tolist())
all_pids=[]
pids = [all_pids.extend([p+1]*e) for p, e in pid_and_events]
df_events = pd.DataFrame(index=all_pids)
df_events['codes'] = all_codes
df_events['days_after'] = all_days
#df_person['female'] = female
df_person['gender'] = gender
df_person['region'] = region
df_person['year'] = birth_year
df_person['month'] = birth_month
df_person['day'] = birth_day
df = df_events.merge(df_person, left_index=True, right_index=True)
df['birth_date'] = pd.to_datetime(df[['year', 'month', 'day']])
df['date'] = df['birth_date'] + pd.to_timedelta(df.days_after, unit='d')
del df['month']
del df['day']
del df['days_after']
df['pid'] = df.index
df.index_name = 'pid_index'
df=df.sort_values(['pid', 'date'])
df=df[columns]
if expand:
splitted = df.codes.str.split(',', expand=True).add_prefix('code_').fillna(np.nan)
df = pd.concat([df,splitted], axis=1)
del df['codes']
# include deaths too?
return df
# Cell
# mark rows that contain certain codes in one or more colums
def get_rows(df, codes, cols=None, sep=None, pid='pid', all_codes=None, fix=True, info=None):
"""
Make a boolean series that is true for all rows that contain the codes
Args
df (dataframe or series): The dataframe with codes
codes (str, list, set, dict): codes to be counted
cols (str or list): list of columns to search in
sep (str): The symbol that seperates the codes if there are multiple codes in a cell
pid (str): The name of the column with the personal identifier
>>>get_rows(df=df, codes='F3', cols='codes', sep=',')
"""
# check if evaluated previously
info, rows = memory(info=info, func = 'get_rows', expr=codes)
if rows:
return rows
# check if codes and columns need to be expanded (needed if they use notation)
if fix:
# do this when if cols exist, but if it does not ...
cols = expand_columns(cols, all_columns=list(df.columns), info=info)
all_codes = sorted(unique(df=df, cols=cols, sep=sep))
codes = expand_code(codes, all_codes=all_codes)
# codes and cols should be lists
codes = listify(codes)
cols = listify(cols)
# approach depends on whether we have multi-value cells or not
# if sep exist, then have multi-value cells
if sep:
# have multi-valued cells
# note: this assumes the sep is a regex word delimiter
codes = [rf'\b{code}\b' for code in codes]
codes_regex = '|'.join(codes)
# starting point: no codes have been found
# needed since otherwise the function might return None if no codes exist
rows = pd.Series(False*len(df),index=df.index)
# loop over all columns and mark when a code exist
for col in cols:
rows=rows | df[col].str.contains(codes_regex, na=False)
# if not multi valued cells
else:
mask = df[cols].isin(codes)
rows = mask.any(axis=1)
return rows
# Cell
def extract_codes(df, codes, cols=None, sep=None, new_sep=',', na_rep='',
prefix=None, merge=False, out='bool', fix=True,
series=True, group=False, all_codes=None, info=None):
"""
Produce one or more columns with only selected codes
Args:
df (dataframe): Dataframe with events
codes (string, list or dict): The codes for the disease
cols (string, list): Name of columns where codes are located
sep (string, default: None): Separator between codes in same cell (if exist)
(If None, the function will infer the separator)
pid (str, default: 'pid'): Name of column with the personal identification number
codebook (list): User specified list of all possible or allowed codes
merge (bool): Content of all columns is merged to one series # only if out='text'?
group (bool): Star an other notation remain a single group, not split into individual codes
out (string, ['text', 'category', 'bool' or 'int']): Datatype of output column(s)
Notes:
Can produce a set of dummy columns for codes and code groups.
Can also produce a merged column with only extracted codes.
Accept star notation.
Also accepts both single value columns and columns with compound codes and separators
Repeat events in same rows are only extracted once
Example:
to create three dummy columns, based on codes in icdmain column:
>>> extract_codes(df=df,
>>> codes={'fracture' : 'S72*', 'cd': 'K50*', 'uc': 'K51*'},
>>> cols=['icdmain', 'icdbi'],
>>> merge=False,
>>> out='text')
extract_codes(df=df, codes={'b':['A1','F3'], 'c':'c*'}, cols='codes', sep=',', merge = False)
extract_codes(df=df, codes={'b':['A1','F3'], 'c':'C*'}, cols='codes', sep=',', merge = False)
extract_codes(df=df, codes=['A1','F3', 'C*'], cols='codes', sep=',', merge = False)
extract_codes(df=df, codes='C*', cols='codes', sep=',', merge = False)
nb: problem with extract rows if dataframe is empty (none of the requested codes)
"""
if isinstance(df, pd.Series):
df=df.to_frame()
cols=[df.columns]
if not cols:
cols=[df.columns]
if fix:
cols=expand_columns(cols, all_columns=list(df.columns))
all_codes = unique(df=df, cols=cols, sep=sep)
if isinstance(codes, str):
codes=listify(codes)
if (isinstance(codes, list)) and (not merge):
codes = expand_code(codes, all_codes=all_codes, info=info)
codes = {code:code for code in codes}
if (isinstance(codes, list)) and (merge):
codes = {str(tuple(codes)):codes}
codes = expand_code(codes, all_codes=all_codes, info=info)
print('after fix', cols, codes)
subset = pd.DataFrame(index=df.index)
for k, v in codes.items():
if v:
rows = get_rows(df=df, codes=v, cols=cols, sep=sep, all_codes=all_codes, fix=False)
else:
rows=False
if out == 'bool':
subset[k] = rows
elif out == 'int':
subset[k] = rows.astype(int)
elif out == 'category':
subset.loc[rows, k] = k
subset[k] = subset[k].astype('category')
else:
subset[k] = na_rep
subset.loc[rows, k] = k
if (merge) and (out == 'bool'):
subset = subset.astype(int).astype(str)
new_codes = list(subset.columns)
if (merge) and (len(codes) > 1):
headline = ', '.join(new_codes)
merged = subset.iloc[:, 0].str.cat(subset.iloc[:, 1:].values, sep=new_sep,
na_rep=na_rep) # strange .T.values seemed to work previouslyi but it should not have
merged = merged.str.strip(',')
subset = merged
subset.name = headline
if out == 'category':
subset = subset.astype('category')
# return a series if only one code is asked for (and also if merged?)
if series and (len(codes) == 1):
subset = subset.squeeze()
return subset
# Cell
class Info():
"""
A class to store information about the data and results from analysis
"""
def __init__(self):
self.evaluated = {}
# Cell
def memory(info, func, expr):
"""
checks if the function has been called with the same argument previously and
if so, returns the same results instead of running the function again
args:
-
"""
rows=None
if info:
if func in info.evaluated:
if expr in info.evaluated[func]:
rows = info.evaluated[func][expr]
else:
info.evaluated[func] = {}
else:
info = Info()
info.evaluated[func] = {}
return info, rows
# Cell
def listify(string_or_list):
"""
return a list if the input is a string, if not: returns the input as it was
Args:
string_or_list (str or any):
Returns:
A list if the input is a string, if not: returns the input as it was
Note:
- allows user to use a string as an argument instead of single lists
- cols='icd10' is allowed instead of cols=['icd10']
- cols='icd10' is transformed to cols=['icd10'] by this function
"""
if isinstance(string_or_list, str):
string_or_list = [string_or_list]
return string_or_list
# Cell
def reverse_dict(dikt):
new_dict = {}
for name, codelist in dikt.items():
codelist = _listify(codelist)
new_dict.update({code: name for code in codelist})
return new_dict
# Cell
def del_dot(code):
if isinstance(code, str):
return code.replace('.','')
else:
codes = [c.replace('.','') for c in code]
return codes
def del_zero(code, left=True, right=False):
if isinstance(codes, str):
codes=[code]
if left:
codes = [c.lstrip('0') for c in code]
if right:
codes = [c.rstrip('0') for c in code]
if isinstance(code, str):
codes=codes[0]
return codes
# Cell
# function to expand a string like 'K51.2-K53.8' to a list of codes
# Need regex to extract the number component of the input string
# The singledispach decorator enables us to have the same name, but use
# different functions depending on the datatype of the first argument.
#
# In our case we want one function to deal with a single string input, and
# another to handle a list of strings. It could all be handled in a single
# function using nested if, but singledispatch makes it less messy and more fun!
# Here is the main function, it is just the name and an error message if the
# argument does not fit any of the inputs that wil be allowed
@singledispatch
def expand_hyphen(expr):
"""
Expands codes expression(s) that have hyphens to list of all codes
Args:
code (str or list of str): String or list of strings to be expanded
Returns:
List of strings
Examples:
expand_hyphen('C00-C26')
expand_hyphen('b01.1*-b09.9*')
expand_hyphen('n02.2-n02.7')
expand_hyphen('c00*-c260')
expand_hyphen('b01-b09')
expand_hyphen('b001.1*-b009.9*')
expand_hyphen(['b001.1*-b009.9*', 'c11-c15'])
Note:
Unequal number of decimals in start and end code is problematic.
Example: C26.0-C27.11 will not work since the meaning is not obvious:
Is the step size 0.01? In which case C27.1 will not be included, while
C27.10 will be (and traing zeros can be important in codes)
"""
raise ValueError('The argument must be a string or a list')
# register the function to be used if the input is a string
@expand_hyphen.register(str)
def _(expr):
# return immediately if nothing to expand
if '-' not in expr:
return [expr]
lower, upper = expr.split('-')
lower=lower.strip()
# identify the numeric component of the code
lower_str = re.search("\d*\.\d+|\d+", lower).group()
upper_str = re.search("\d*\.\d+|\d+", upper).group()
# note: what about european decimal notation?
# also note: what if multiple groups K50.1J8.4-etc
lower_num = int(lower_str.replace('.',''))
upper_num = int(upper_str.replace('.','')) +1
if upper_num<lower_num:
raise ValueError('The start code cannot have a higher number than the end code')
# remember length in case of leading zeros
length = len(lower_str)
nums = range(lower_num, upper_num)
# must use integers in a loop, not floats
# which also means that we must multiply and divide to get decimal back
# and take care of leading and trailing zeros that may disappear
if '.' in lower_str:
lower_decimals = len(lower_str.split('.')[1])
upper_decimals = len(upper_str.split('.')[1])
if lower_decimals==upper_decimals:
multiplier = 10**lower_decimals
codes = [lower.replace(lower_str, format(num /multiplier, f'.{lower_decimals}f').zfill(length)) for num in nums]
# special case: allow k1.1-k1.123, but not k.1-k2.123 the last is ambigious: should it list k2.0 only 2.00?
elif (lower_decimals<upper_decimals) & (upper_str.split('.')[0]==lower_str.split('.')[0]):
from_decimal = int(lower_str.split('.')[1])
to_decimal = int(upper_str.split('.')[1]) +1
nums = range(from_decimal, to_decimal)
decimal_str = '.'+lower.split('.')[1]
codes = [lower.replace(decimal_str, '.'+str(num)) for num in nums]
else:
raise ValueError('The start code and the end code do not have the same number of decimals')
else:
codes = [lower.replace(lower_str, str(num).zfill(length)) for num in nums]
return codes
# register the function to be used if if the input is a list of strings
@expand_hyphen.register(list)
def _(expr):
extended = []
for word in expr:
extended.extend(expand_hyphen(word))
return extended
# Cell
# A function to expand a string with star notation (K50*)
# to list of all codes starting with K50
@singledispatch
def expand_star(code, all_codes=None):
"""
Expand expressions with star notation to a list of all values with the specified pattern
Args:
expr (str or list): Expression (or list of expressions) to be expanded
all_codes (list) : A list of all codes
Examples:
expand_star('K50*', all_codes=icd9)
expand_star('K*5', all_codes=icd9)
expand_star('*5', all_codes=icd9)
"""
raise ValueError('The argument must be a string or a list')
@expand_star.register(str)
def _(code, all_codes=None):
# return immediately if there is nothing to expand
if '*' not in code:
return [code]
start_str, end_str = code.split('*')
if start_str and end_str:
codes = {code for code in all_codes if (code.startswith(start_str) & code.endswith(end_str))}
if start_str:
codes = {code for code in all_codes if code.startswith(start_str)}
if end_str:
codes = {code for code in all_codes if code.endswith(end_str)}
return sorted(list(codes))
@expand_star.register(list)
def _(code, all_codes=None):
expanded=[]
for star_code in code:
new_codes = expand_star(star_code, all_codes=all_codes)
expanded.extend(new_codes)
# uniqify in case some overlap
expanded = list(set(expanded))
return sorted(expanded)
# Cell
# function to get all codes in a list between the specified start and end code
# Example: Get all codes between K40:L52
@singledispatch
def expand_colon(code, all_codes=None):
raise ValueError('The argument must be a string or a list')
@expand_colon.register(str)
def _(code, all_codes=None):
"""
Expand expressions with colon notation to a list of complete code names
code (str or list): Expression (or list of expressions) to be expanded
all_codes (list or array) : The list to slice from
Examples
K50:K52
K50.5:K52.19
A3.0:A9.3
Note: This is different from hyphen and star notation because it can handle
different code lengths and different number of decimals
"""
if ':' not in code:
return [code]
startstr, endstr = code.split(':')
# remove spaces
startstr = startstr.strip()
endstr =endstr.strip()
# find start and end position
startpos = all_codes.index(startstr)
endpos = all_codes.index(endstr) + 1
# slice list
expanded = all_codes[startpos:endpos+1]
return expanded
@expand_colon.register(list)
def _(code, all_codes=None, regex=False):
expanded=[]
for cod in code:
new_codes = expand_colon(cod, all_codes=all_codes)
expanded.extend(new_codes)
return expanded
# Cell
# Return all elements in a list that fits a regex pattern
@singledispatch
def expand_regex(code, all_codes):
raise ValueError('The argument must be a string or a list of strings')
@expand_regex.register(str)
def _(code, all_codes=None):
code_regex = re.compile(code)
expanded = {code for code in all_codes if code_regex.match(code)}
# uniqify
expanded = list(set(expanded))
return expanded
@expand_regex.register(list)
def _(code, all_codes):
expanded=[]
for cod in code:
new_codes = expand_regex(cod, all_codes=all_codes)
expanded.extend(new_codes)
# uniqify in case some overlap
expanded = sorted(list(set(expanded)))
return expanded
# Cell
@singledispatch
def expand_code(code, all_codes=None,
hyphen=True, star=True, colon=True, regex=False,
drop_dot=False, drop_leading_zero=False,
sort_unique=True, info=None):
raise ValueError('The argument must be a string or a list of strings')
@expand_code.register(str)
def _(code, all_codes=None,
hyphen=True, star=True, colon=True, regex=False,
drop_dot=False, drop_leading_zero=False,
sort_unique=True, info=None):
#validating input
if (not regex) and (':' in code) and (('-' in code) or ('*' in code)):
raise ValueError('Notation using colon must start from and end in specific codes, not codes using star or hyphen')
if regex:
codes = expand_regex(code, all_codes=all_codes)
return codes
if drop_dot:
code = del_dot(code)
codes=[code]
if hyphen:
codes=expand_hyphen(code)
if star:
codes=expand_star(codes, all_codes=all_codes)
if colon:
codes=expand_colon(codes, all_codes=all_codes)
if sort_unique:
codes = sorted(list(set(codes)))
return codes
@expand_code.register(list)
def _(code, all_codes=None, hyphen=True, star=True, colon=True, regex=False,
drop_dot=False, drop_leading_zero=False,
sort_unique=True, info=None):
expanded=[]
for cod in code:
new_codes = expand_code(cod, all_codes=all_codes, hyphen=hyphen, star=star, colon=colon, regex=regex, drop_dot=drop_dot, drop_leading_zero=drop_leading_zero)
expanded.extend(new_codes)
# uniqify in case some overlap
expanded = list(set(expanded))
return sorted(expanded)
# a dict of names and codes (in a string or a list)
@expand_code.register(dict)
def _(code, all_codes=None, hyphen=True, star=True, colon=True, regex=False,
drop_dot=False, drop_leading_zero=False,
sort_unique=True, info=None):
expanded={}
for name, cod in code.items():
if isinstance(cod,str):
cod = [cod]
expanded_codes=[]
for co in cod:
new_codes = expand_code(co, all_codes=all_codes, hyphen=hyphen, star=star, colon=colon, regex=regex, drop_dot=drop_dot, drop_leading_zero=drop_leading_zero)
expanded_codes.extend(new_codes)
expanded[name] = list(set(expanded_codes))
return expanded
# Cell
@singledispatch
def expand_columns(expr, all_columns=None, df=None, star=True,
hyphen=True, colon=True, regex=None, info=None):
"""
Expand columns with special notation to their full column names
"""
raise ValueError('Must be str or list of str')
@expand_columns.register(str)
def _(expr, all_columns=None, df=None, star=True,
hyphen=True, colon=True, regex=None, info=None):
notations = '* - :'.split()
# return immediately if not needed
if not any(symbol in expr for symbol in notations):
return [expr]
# get a list of columns of it is only implicity defined by the df
# warning: may depreciate this, require explicit all_columns
if df & (not all_columns):
all_columns=list(df.columns)
if regex:
cols = [col for col in all_columns if re.match(regex, expr)]
else:
if hyphen:
cols = expand_hyphen(expr)
if star:
cols = expand_star(expr, all_codes=all_columns)
if colon:
cols = expand_colon(expr, all_codes=all_columns)
return cols
@expand_columns.register(list)
def _(expr, all_columns=None, df=None, star=True,
hyphen=True, colon=True, regex=None, info=None):
all_columns=[]
for col in expr:
new_columns = expand_columns(col, all_columns=all_columns, df=df, star=star,
hyphen=hyphen, colon=colon, regex=regex, info=info)
all_columns.extend(new_columns)
return all_columns
# Cell
def format_codes(codes, merge=True):
"""
Makes sure that the codes has the desired format: a dict with strings as
keys (name) and a list of codes as values)
Background: For several functions the user is allower to use strings
when there is only one element in the list, and a list when there is
no code replacement or aggregations, or a dict. To avoid (even more) mess
the input is standardised as soon as possible in a function.
Examples:
codes = '4AB02'
codes='4AB*'
codes = ['4AB02', '4AB04', '4AC*']
codes = ['4AB02', '4AB04']
codes = {'tumor' : 'a4*', 'diabetes': ['d3*', 'd5-d9']}
codes = 'S72*'
codes = ['K50*', 'K51*']
_format_codes(codes, merge=False)
TODO: test for correctness of input, not just reformat (is the key a str?)
"""
codes = _listify(codes)
# treatment of pure lists depends on whether special classes should be treated as one merged group or separate codes
# exmple xounting of Z51* could mean count the total number of codes with Z51 OR a shorthand for saying "count all codes starting with Z51 separately
# The option "merged, enables the user to switch between these two interpretations
if isinstance(codes, list):
if merge:
codes = {'_'.join(codes): codes}
else:
codes = {code: [code] for code in codes}
elif isinstance(codes, dict):
new_codes = {}
for name, codelist in codes.items():
if isinstance(codelist, str):
codelist = [codelist]
new_codes[name] = codelist
codes = new_codes
return codes
# Cell
def _expand_regex(expr, full_list):
exprs = _listify(expr)
expanded = []
if isinstance(full_list, pd.Series):
pass
elif isinstance(full_list, list):
unique_series = pd.Series(full_list)
elif isinstance(full_list, set):
unique_series = pd.Series(list(full_list))
for expr in exprs:
match = unique_series.str.contains(expr)
expanded.extend(unique_series[match])
return expanded
# Cell
def insert_external(expr):
"""
Replaces variables prefixed with @ in the expression with the
value of the variable from the global namespace
Example:
x=['4AB02', '4AB04', '4AB06']
expr = '@x before 4AB02'
insert_external(expr)
"""
externals = [word.strip('@') for word in expr.split() if word.startswith('@')]
for external in externals:
tmp = globals()[external]
expr = expr.replace(f'@{external} ', f'{tmp} ')
return expr
# Cell
# A function to identify all unique values in one or more columns
# with one or multiple codes in each cell
def unique(df, cols=None, sep=None, all_str=True, info=None):
"""
Lists unique values from one or more columns
sep (str): separator if cells have multiple values
all_str (bool): converts all values to strings
unique(df=df, cols='inpatient', sep=',')
"""
# if no column(s) are specified, find unique values in whole dataframe
if cols==None:
cols=list(df.columns)
cols = listify(cols)
# multiple values with separator in cells
if sep:
all_unique=set()
for col in cols:
new_unique = set(df[col].str.cat(sep=',').split(','))
all_unique.update(new_unique)
# single valued cells
else:
all_unique = pd.unique(df[cols].values.ravel('K'))
# if need to make sure all elements are strings without surrounding spaces
if all_str:
all_unique=[str(value).strip() for value in all_unique]
return all_unique
# Cell
def count_codes(df, codes=None, cols=None, sep=None, normalize=False,
ascending=False, fix=True, merge=False, group=False, dropna=True, all_codes=None, info=None):
"""
Count frequency of values in multiple columns and columns with seperators
Args:
codes (str, list of str, dict): codes to be counted. If None, all codes will be counted
cols (str or list of str): columns where codes are
sep (str): separator if multiple codes in cells
merge (bool): If False, each code wil be counted separately
If True (default), each code with special notation will be counted together
strip (bool): strip space before and after code before counting
ignore_case (bool): determine if codes with same characters,
but different cases should be the same
normalize (bool): If True, outputs percentages and not absolute numbers
dropna (bool): If True, codes not listed are not counted and ignored when calculating percentages
allows
- star notation in codes and columns
- values in cells with multiple valules can be separated (if sep is defined)
- replacement and aggregation to larger groups (when code is a dict)
example
To count the number of stereoid events (codes starting with H2) and use of
antibiotics (codes starting with xx) in all columns where the column names
starts with "atc":
count_codes(df=df,
codes={'stereoids' : 'H2*', 'antibiotics' : =['AI3*']},
cols='atc*',
sep=',')
more examples
-------------
df.count_codes(codes='K51*', cols='icd', sep=',')
count_codes(df, codes='K51*', cols='icdm', sep=',', group=True)
count_codes(df, codes='Z51*', cols=['icd', 'icdbi'], sep=',')
count_codes(df, codes='Z51*', cols=['icdmain', 'icdbi'], sep=',', group=True)
count_codes(df, codes={'radiation': 'Z51*'}, cols=['icd'], sep=',')
count_codes(df, codes={'radiation': 'Z51*'}, cols=['icdmain', 'icdbi'], sep=',')
count_codes(df, codes={'crohns': 'K50*', 'uc':'K51*'}, cols=['icdmain', 'icdbi'], sep=',')
count_codes(df, codes={'crohns': 'K50*', 'uc':'K51*'}, cols=['icdmain', 'icdbi'], sep=',', dropna=True)
count_codes(df, codes={'crohns': 'K50*', 'uc':'K51*'}, cols=['icdmain', 'icdbi'], sep=',', dropna=False)
count_codes(df, codes={'crohns': 'K50*', 'uc':'K51*'}, cols=['icdmain', 'icdbi'], sep=',', dropna=False, group=False)
count_codes(df, codes=['K50*', 'K51*'], cols=['icd'], sep=',', dropna=False, group=True, merge=False)
count_codes(df, codes=['K50*', 'K51*'], cols=['icdmain', 'icdbi'], sep=',', dropna=False, group=False, merge=False)
count_codes(df, codes=['K50*', 'K51*'], cols=['icdmain', 'icdbi'], sep=',', dropna=False, group=False, merge=True)
count_codes(df, codes=['K50*', 'K51*'], cols=['icdmain', 'icdbi'], sep=',', dropna=True, group=True, merge=True)
#group fasle, merge true, for list = wrong ...
count_codes(df, codes=['K50*', 'K51*'], cols=['icdmain', 'icdbi'], sep=',', dropna=True, group=False, merge=False)
"""
# preliminary formating
if isinstance(df, pd.Series):
df=df.to_frame()
cols=list(df.columns)
# maybe df[pid]=df.index
if not codes:
codes=unique(df=df, cols=cols, sep=sep, info=info)
all_codes = list(set(codes))
cols=expand_columns(cols, all_columns=list(df.columns))
if not all_codes:
all_codes = unique(df=df, cols=cols, sep=sep)
old_codes=codes
codes = expand_code(codes, all_codes=all_codes, info=info)
if isinstance(old_codes, str) and (merge):
codes = {old_codes:codes}
elif isinstance(old_codes, str) and not (merge):
codes = {code:code for code in codes}
elif isinstance(old_codes, list) and (merge):
codes = {str(old_codes): codes}
elif isinstance(old_codes, list) and not (merge):
codes = {code: code for code in codes}
only_codes=[]
for name, code in codes.items():
code=listify(code)
only_codes.extend(code)
# prevent duplicates
only_codes=list(set(only_codes))
sub = df
if dropna:
rows = get_rows(df=sub, codes=only_codes, cols=cols, sep=sep, all_codes=all_codes)
sub = sub[rows]
if sep:
count=Counter()
for col in cols:
codes_in_col = [code.strip() for code in sub[col].str.cat(sep=sep).split(sep)]
count.update(codes_in_col)
code_count= | pd.Series(count) | pandas.Series |
import pandas as pd
import numpy as np
import sklearn.feature_selection
import sklearn.preprocessing
import sklearn.model_selection
import mlr
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import statistics
# sorting variables
def sort_by_feature_name(df):
df =df.T
a = []
for i in df.T.columns:
a.append(len(i))
df["len"] = a
df_sorted = df.sort_values(["len"])
df_sorted = df_sorted.drop(["len"],axis=1)
return df_sorted.T
# Remove feature correlations, using Pearson correlation, based on the variable threshold
def remove_correlation(dataset, threshold):
col_corr = set() # Set of all the names of deleted columns
corr_matrix = dataset.corr().abs()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if corr_matrix.iloc[i, j] >= threshold:
colname = corr_matrix.columns[i] # getting the name of column
col_corr.add(colname)
if colname in dataset.columns:
del dataset[colname] # deleting the column from the dataset
return dataset
# SEP is the standard error of prediction (test set). SEE is the error for training
def sep(yt,yp):
return np.sqrt(mean_squared_error(yt, yp))
def run_MLREM(df2, name, dependent_variable, up_to_beta=200, screen_variance=False):
df=df2.copy()
# Separating independent and dependent variables x and y
y = df[dependent_variable].to_numpy().reshape(-1,1)
x = df.drop(dependent_variable,axis=1)
x_sorted=sort_by_feature_name(x)
x_pastvar = x_sorted.copy()
if screen_variance:
selector = sklearn.feature_selection.VarianceThreshold(threshold=0.01)
selector.fit(x_sorted)
x_pastvar=x_sorted.T[selector.get_support()].T
x_remcorr = remove_correlation(x_pastvar,0.9)
y_scaller = sklearn.preprocessing.StandardScaler()
x_scaller = sklearn.preprocessing.StandardScaler()
ys_scalled = y_scaller.fit_transform(y)
xs_scalled = x_scaller.fit_transform(x_remcorr)
ind = x_remcorr.columns
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(xs_scalled, ys_scalled, test_size=0.3)
df_X_test = | pd.DataFrame(X_test, columns=ind) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# ## Comparing the whole TextKernel dataset to the sample from which skills are extracted
#
# Compare the sample of TK job adverts used in the skills with all the TK data.
# %%
# cd ../../..
# %%
from skills_taxonomy_v2.getters.s3_data import load_s3_data, get_s3_data_paths
from collections import Counter
from datetime import datetime
from tqdm import tqdm
import pandas as pd
import boto3
import matplotlib.pyplot as plt
bucket_name = "skills-taxonomy-v2"
s3 = boto3.resource("s3")
# %% [markdown]
# ## Load all TK counts
# %%
all_tk_year_month_counts = pd.read_csv(
"outputs/tk_analysis/all_tk_year_month_counts.csv"
)
all_tk_count_region_df = pd.read_csv("outputs/tk_analysis/all_tk_regions_counts.csv")
all_tk_count_subregion_df = pd.read_csv(
"outputs/tk_analysis/all_tk_subregions_counts.csv"
)
# %% [markdown]
# ## Load sentences that went into skills
# %%
file_name_date = "2021.08.31"
sentence_clusters = load_s3_data(
s3,
bucket_name,
f"outputs/skills_extraction/extracted_skills/{file_name_date}_sentences_data.json",
)
sentence_clusters = pd.DataFrame(sentence_clusters)
sentence_clusters.head(2)
# %%
skill_job_ads = set(sentence_clusters["job id"].unique())
# %% [markdown]
# ## How many job adverts
# %%
total_number_jobadvs = 62892486 # Found in 'TextKernel Data.ipynb'
# %%
skill_num_jobadvs = len(skill_job_ads)
# %%
print(f"Sentences that make up skills are from {skill_num_jobadvs} job adverts")
print(
f"This is {round(skill_num_jobadvs*100/total_number_jobadvs,2)}% of all job adverts"
)
# %% [markdown]
# ## Dates
# 'date', 'expiration_date'
# %%
tk_dates = []
for file_name in tqdm(range(0, 13)):
file_date_dict = load_s3_data(
s3, bucket_name, f"outputs/tk_data_analysis/metadata_date/{file_name}.json"
)
tk_dates.extend(
[f[0] for job_id, f in file_date_dict.items() if job_id in skill_job_ads]
)
print(len(tk_dates))
# %%
pd.DataFrame(tk_dates).to_csv("outputs/tk_analysis/tk_dates.csv")
# %%
skill_tk_dates = pd.DataFrame(tk_dates)
skill_tk_dates["date"] = pd.to_datetime(skill_tk_dates[0], format="%Y-%m-%d")
# %%
num_dates_null = sum(pd.isnull(skill_tk_dates[0]))
num_dates_null
# %%
print(len(skill_tk_dates))
skill_tk_dates = skill_tk_dates[pd.notnull(skill_tk_dates[0])]
print(len(skill_tk_dates))
# %%
skill_tk_dates["year"] = pd.DatetimeIndex(skill_tk_dates[0]).year
skill_tk_dates["month"] = pd.DatetimeIndex(skill_tk_dates[0]).month
# %%
year_month_counts = skill_tk_dates.groupby(["year", "month"])[0].count()
# %%
year_month_counts = year_month_counts.sort_index().reset_index()
year_month_counts["year/month"] = (
year_month_counts[["year", "month"]].astype(str).agg("/".join, axis=1)
)
year_month_counts
# %%
# Add a row for the None date counts and save
pd.concat(
[
year_month_counts,
pd.DataFrame(
[{"year": None, "month": None, 0: num_dates_null, "year/month": None}]
),
],
ignore_index=True,
axis=0,
).to_csv("outputs/tk_analysis/skills_tk_year_month_counts.csv")
# %% [markdown]
# ### Get proportions for side by side comparison
# Not using no-date data
# %%
year_month_counts["Proportion"] = year_month_counts[0] / (year_month_counts[0].sum())
# %%
all_tk_year_month_counts_nonull = all_tk_year_month_counts[
pd.notnull(all_tk_year_month_counts["year"])
]
all_tk_year_month_counts_nonull["Proportion"] = all_tk_year_month_counts_nonull["0"] / (
all_tk_year_month_counts_nonull["0"].sum()
)
# %% [markdown]
# ### Plot dates with all TK dates
# %%
nesta_orange = [255 / 255, 90 / 255, 0]
nesta_purple = [155 / 255, 0, 195 / 255]
nesta_grey = [165 / 255, 148 / 255, 130 / 255]
# %%
ax = all_tk_year_month_counts_nonull.plot(
x="year/month",
y="Proportion",
xlabel="Date of job advert",
ylabel="Proportion of job adverts",
c=nesta_grey,
)
ax = year_month_counts.plot(
x="year/month",
y="Proportion",
xlabel="Date of job advert",
ylabel="Proportion of job adverts",
c=nesta_orange,
ax=ax,
)
ax.legend(["All TK job adverts", "TK job adverts in sample"])
ax.figure.savefig(
"outputs/tk_analysis/job_ad_date_sample_comparison.pdf", bbox_inches="tight"
)
# %%
all_tk_year_month_counts_nonull["year"] = all_tk_year_month_counts_nonull[
"year"
].astype(int)
# %%
fig = plt.figure(figsize=(7, 4)) # Create matplotlib figure
ax = fig.add_subplot(111) # Create matplotlib axes
width = 0.3
pd.DataFrame(
all_tk_year_month_counts_nonull.groupby("year")["0"].sum()
/ sum(all_tk_year_month_counts_nonull["0"])
).plot.bar(color=nesta_grey, ax=ax, width=width, position=1)
pd.DataFrame(
year_month_counts.groupby("year")[0].sum() / sum(year_month_counts[0])
).plot.bar(color=nesta_orange, ax=ax, width=width, position=0)
ax.set_ylabel("Proportion of job adverts")
ax.set_xlabel("Year of job advert")
ax.legend(["All TK job adverts", "TK job adverts in sample"], loc="upper right")
ax.figure.savefig(
"outputs/tk_analysis/job_ad_year_sample_comparison.pdf", bbox_inches="tight"
)
# %% [markdown]
# ## Location
# %%
tk_region = []
tk_subregion = []
for file_name in tqdm(range(0, 13)):
file_dict = load_s3_data(
s3, bucket_name, f"outputs/tk_data_analysis/metadata_location/{file_name}.json"
)
tk_region.extend(
[f[2] for job_id, f in file_dict.items() if f and job_id in skill_job_ads]
)
tk_subregion.extend(
[f[3] for job_id, f in file_dict.items() if f and job_id in skill_job_ads]
)
print(len(tk_region))
print(len(tk_subregion))
# %%
print(len(set(tk_region)))
print(len(set(tk_subregion)))
# %%
count_region_df = pd.DataFrame.from_dict(Counter(tk_region), orient="index")
count_region_df
# %%
count_region_df.to_csv("outputs/tk_analysis/skills_tk_regions_counts.csv")
# %%
print(count_region_df[0].sum())
count_region_df = count_region_df[pd.notnull(count_region_df.index)]
print(count_region_df[0].sum())
# %%
count_region_df
# %%
all_tk_count_region_df_nonull = all_tk_count_region_df[
pd.notnull(all_tk_count_region_df["Unnamed: 0"])
]
all_tk_count_region_df_nonull.index = all_tk_count_region_df_nonull["Unnamed: 0"]
all_tk_count_region_df_nonull
# %%
fig = plt.figure(figsize=(7, 4)) # Create matplotlib figure
ax = fig.add_subplot(111) # Create matplotlib axes
width = 0.3
ax = (
pd.DataFrame(
all_tk_count_region_df_nonull["0"] / sum(all_tk_count_region_df_nonull["0"])
)
.sort_values(by=["0"], ascending=False)
.plot.bar(color=nesta_grey, legend=False, ax=ax, width=width, position=1)
)
ax = pd.DataFrame(count_region_df[0] / sum(count_region_df[0])).plot.bar(
color=nesta_orange, legend=False, ax=ax, width=width, position=0
)
ax.set_ylabel("Proportion of job adverts")
ax.set_xlabel("Region of job advert")
ax.legend(["All TK job adverts", "TK job adverts in sample"], loc="upper right")
ax.figure.savefig(
"outputs/tk_analysis/job_ad_region_sample_comparison.pdf", bbox_inches="tight"
)
# %%
count_subregion_df = pd.DataFrame.from_dict(Counter(tk_subregion), orient="index")
# %%
count_subregion_df.to_csv("outputs/tk_analysis/skills_tk_subregions_counts.csv")
# %%
print(count_subregion_df[0].sum())
count_subregion_df = count_subregion_df[ | pd.notnull(count_subregion_df.index) | pandas.notnull |
""" this is a mixture of the best #free twitter sentimentanalysis modules on github.
i took the most usable codes and mixed them into one because all of them
where for a linguistical search not usable and did not show a retweet or a full tweet
no output as csv, only few informations of a tweet, switching language
or even to compare linguistic features in tweets of two different langauges and etc. etc ...
special and many many thanks to https://github.com/vprusso/youtube_tutorials who showed on his
page a tutorial on how to do a sentimentanalysis with python
i did this for users with not much skills and linguistical background to help them to get a corpus of twitterdata
and to show them how to do a comparison between sentence based vs document based sentimentanalysis
credits to all AVAILABLE FREE AND SIMPLE sentimentanalysis programms (dec. 2019) on github.
many thanks to everybody and of course to github for making this exchange and usage possible!
cemre koc (Goethe University, Frankfurt) Python3.7
"""
from textblob import TextBlob #Sentimentlexikon FOR GERMAN (TEXTBLOB_DE import textblob_de
import re #modul for regular expressions
from tweepy import API #Twitter API modul for more info: look tweepy doc please!
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import tweepy #usage of diffrent feautres of my programm
import sys #only if wanted
import csv ##only if wanted (see rest of programm)
import pandas as pd #pandas for illustration
import authentification #access to twitter
import numpy as np #collection of tweets via numpy
import matplotlib.pyplot as plt #if needed (see below for ploting)
import numpy
#output screen (if you use pycharm for full screen view)
#only if needed
pd.set_option('display.max_rows', 1000000000000)
pd.set_option('display.max_columns', 1000000000)
| pd.set_option('display.width', 100000000000) | pandas.set_option |
# Downstream: crime prediction (also applicable to Fire calls prediction)
# two modes:
# --- No exogenous data
# --- Oracle network
# The model consists of a 3d cnn network that uses
# historical ST data to predict next time step
# users can choose not to use any features, or
# to use arbitrary number of 1D or 2D features.
import pandas as pd
import numpy as np
import sys
import os
import os.path
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
from os import getcwd
from os.path import join
import matplotlib.pyplot as plt
import argparse
import time
import datetime
from datetime import timedelta
from utils import datetime_utils
import evaluation
import crime_oracle
from matplotlib import pyplot as plt
HEIGHT = 32
WIDTH = 20
TIMESTEPS = 56
BIKE_CHANNEL = 1 # number of channels in historical ST
NUM_2D_FEA = 15
NUM_1D_FEA = 3
BATCH_SIZE = 32
TRAINING_STEPS = 200
LEARNING_RATE = 0.005
fea_list = ['pop','normalized_pop', 'bi_caucasian','bi_age', 'bi_high_incm','bi_edu_univ', 'bi_nocar_hh',
'white_pop','age65_under', 'edu_uni']
class train:
def __init__(self, raw_df, demo_raw,
train_start_time = '2014-02-01',train_end_time = '2018-10-31',
test_start_time = '2018-11-01 00:00:00', test_end_time = '2019-05-01 23:00:00' ):
self.raw_df = raw_df
self.demo_raw = demo_raw
self.train_start_time = train_start_time
self.train_end_time = train_end_time
# set train/test set
self.test_start_time = test_start_time
self.test_end_time = test_end_time
self.window = datetime.timedelta(hours=24 * 7)
self.step = datetime.timedelta(hours=3)
self.predict_start_time = datetime_utils.str_to_datetime(self.test_start_time) + self.window
self.predict_end_time = datetime_utils.str_to_datetime(self.test_end_time)
self.actual_end_time = self.predict_end_time - self.window
self.train_df = raw_df[self.train_start_time: self.train_end_time]
self.test_df = raw_df[self.test_start_time: self.test_end_time]
self.grid_list = list(raw_df)
def generate_binary_demo_attr(self, intersect_pos_set,
bi_caucasian_th = 65.7384, age65_th = 13.01,
hh_incm_hi_th = 41.76, edu_uni_th =53.48, no_car_hh_th = 16.94 ):
self.demo_raw['bi_caucasian'] = [0]*len(self.demo_raw)
self.demo_raw['bi_age'] = [0]*len(self.demo_raw)
self.demo_raw['bi_high_incm'] = [0]*len(self.demo_raw)
self.demo_raw['bi_edu_univ'] = [0]*len(self.demo_raw)
self.demo_raw['bi_nocar_hh'] = [0]*len(self.demo_raw)
self.demo_raw['mask'] = [0]*len(self.demo_raw)
# should ignore cells that have no demo features
for idx, row in self.demo_raw.iterrows():
if row['pos'] not in intersect_pos_set:
continue
self.demo_raw.loc[idx,'mask'] = 1
# caucasian = 1
if row['white_pop'] >= bi_caucasian_th:
self.demo_raw.loc[idx,'bi_caucasian'] = 1
else:
self.demo_raw.loc[idx,'bi_caucasian'] = -1
# young = 1
if row['age65'] < age65_th:
self.demo_raw.loc[idx,'bi_age'] = 1
else:
self.demo_raw.loc[idx,'bi_age'] = -1
# high_income = 1
if row['hh_incm_hi'] > hh_incm_hi_th:
self.demo_raw.loc[idx,'bi_high_incm'] = 1
else:
self.demo_raw.loc[idx,'bi_high_incm'] = -1
# edu_univ = 1
if row['edu_uni'] > edu_uni_th:
self.demo_raw.loc[idx,'bi_edu_univ'] = 1
else:
self.demo_raw.loc[idx,'bi_edu_univ'] = -1
# more car = 1
if row['no_car_hh'] < no_car_hh_th:
self.demo_raw.loc[idx,'bi_nocar_hh'] = 1
else:
self.demo_raw.loc[idx,'bi_nocar_hh'] = -1
self.demo_raw['normalized_pop'] = self.demo_raw['pop'] / self.demo_raw['pop'].sum()
self.demo_raw['age65_under'] = 100- self.demo_raw['age65']
# make mask for demo data
def demo_mask(self):
rawdata_list = list()
temp_image = [[0 for i in range(HEIGHT)] for j in range(WIDTH)]
series = self.demo_raw['mask']
for i in range(len(self.demo_raw)):
r = self.demo_raw['row'][i]
c = self.demo_raw['col'][i]
temp_image[r][c] = series[i]
temp_arr = np.array(temp_image)
temp_arr = np.rot90(temp_arr)
rawdata_list.append(temp_arr)
rawdata_arr = np.array(rawdata_list)
rawdata_arr = np.moveaxis(rawdata_arr, 0, -1)
return rawdata_arr # mask_arr
def df_to_tensor(self):
rawdata_list = list()
for idx, dfrow in self.raw_df.iterrows():
temp_image = [[0 for i in range(HEIGHT)] for j in range(WIDTH)]
for col in list(self.raw_df ):
r = int(col.split('_')[0])
c = int(col.split('_')[1])
temp_image[r][c] = dfrow[col]
temp_arr = np.array(temp_image)
temp_arr = np.rot90(temp_arr)
rawdata_list.append(temp_arr)
rawdata_arr = np.array(rawdata_list)
return rawdata_arr
# demographic data to array: [32, 32, 14]
def demodata_to_tensor(self, demo_arr = None):
if demo_arr is None:
raw_df = self.demo_raw.fillna(0)
raw_df = demo_arr.fillna(0)
rawdata_list = list()
for fea in fea_list:
temp_image = [[0 for i in range(HEIGHT)] for j in range(WIDTH)]
series = raw_df[fea]
for i in range(len(raw_df)):
r = raw_df['row'][i]
c = raw_df['col'][i]
temp_image[r][c] = series[i]
temp_arr = np.array(temp_image)
temp_arr = np.rot90(temp_arr)
rawdata_list.append(temp_arr)
# [14, 32, 32]
rawdata_arr = np.array(rawdata_list)
rawdata_arr = np.moveaxis(rawdata_arr, 0, -1)
return rawdata_arr
def selected_demo_to_tensor(self):
fea_to_include = fea_list.copy()
fea_to_include.extend(['pos', 'row','col'])
selected_demo_df = self.demo_raw[fea_to_include]
demo_arr = self.demodata_to_tensor(selected_demo_df)
return demo_arr
def generate_fixlen_timeseries(self, rawdata_arr):
raw_seq_list = list()
arr_shape = rawdata_arr.shape
for i in range(0, arr_shape[0] - (TIMESTEPS + 1)+1):
start = i
end = i+ (TIMESTEPS + 1)
temp_seq = rawdata_arr[start: end]
raw_seq_list.append(temp_seq)
raw_seq_arr = np.array(raw_seq_list)
raw_seq_arr = np.swapaxes(raw_seq_arr,0,1)
return raw_seq_arr
# split train/test according to predefined timestamps
'''
return:
train_arr: e.g.:[(169, # of training examples, 30, 30)]
'''
def train_test_split(self,raw_seq_arr):
train_hours = datetime_utils.get_total_3hour_range(self.train_start_time, self.train_end_time)
train_arr = raw_seq_arr[:, :train_hours]
test_arr = raw_seq_arr[:, train_hours:]
return train_arr, test_arr
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-use_1d_fea', type=bool, default=False,
action="store", help = 'whether to use 1d features. If use this option, set to True. Otherwise, default False')
parser.add_argument('-use_2d_fea', type=bool, default=False,
action="store", help = 'whether to use 2d features')
parser.add_argument('-use_3d_fea', type=bool, default=False,
action="store", help = 'whether to use 3d features')
parser.add_argument('-s', '--suffix',
action="store", help = 'save path suffix', default = '')
parser.add_argument("-r","--resume_training", type=bool, default=False,
help="A boolean value whether or not to resume training from checkpoint")
parser.add_argument('-t', '--train_dir',
action="store", help = 'training dir containing checkpoints', default = '')
parser.add_argument('-c', '--checkpoint',
action="store", help = 'checkpoint path (resume training)', default = None)
parser.add_argument('-e', '--epoch', type=int,
action="store", help = 'epochs to train', default = 200)
parser.add_argument('-l', '--learning_rate', type=float,
action="store", help = 'epochs to train', default = 0.005)
return parser.parse_args()
def main():
args = parse_args()
use_1d_fea = bool(args.use_1d_fea)
use_2d_fea = bool(args.use_2d_fea)
use_3d_fea = bool(args.use_3d_fea)
suffix = args.suffix
# the following arguments for resuming training
resume_training = args.resume_training
train_dir = args.train_dir
checkpoint = args.checkpoint
epoch = args.epoch
learning_rate= args.learning_rate
if checkpoint is not None:
checkpoint = train_dir + checkpoint
print('pick up checkpoint: ', checkpoint)
print('load data for Seattle...')
globals()['TRAINING_STEPS'] = epoch
globals()['LEARNING_RATE'] = learning_rate
rawdata = pd.read_csv('../data_processing/3d_source_data/seattlecrime_grided_3-day_3-hour_20140101-20190505.csv', index_col = 0)
rawdata.index = pd.to_datetime(rawdata.index)
rawdata = rawdata.loc['2014-02-01 00:00:00': '2019-05-01 23:00:00']
# a set of region codes (e.g.: 10_10) that intersect with the city
intersect_pos = pd.read_csv('../auxillary_data/intersect_pos_32_20.csv')
intersect_pos_set = set(intersect_pos['0'].tolist())
# demographic data
# should use 2018 data
demo_raw = | pd.read_csv('../auxillary_data/whole_grid_32_20_demo_1000_intersect_geodf_2018_corrected.csv', index_col = 0) | pandas.read_csv |
# coding: utf-8
# In[37]:
import pandas as pd
from sklearn import preprocessing
import numpy as np
import os
import h5py
import json
import h5py
# In[17]:
distance_data_path = "data.csv"
hnsw_result_path = "/home/lab4/code/HNSW/KNN-Evaluate/hnsw_result1111.h5py"
test_file_path = "test_image_feature.csv"
train_file_path = "vect_itemid130k.h5py"
# ### Loading hnsw data
# In[18]:
f = h5py.File(hnsw_result_path)
hnsw_ids_result = np.array(f["itemID"])
# ### Loading testing data and sort() test_ids to match to result_hnsw_ids
#
# In[19]:
df = | pd.read_csv(test_file_path, sep="\t", converters={1: json.loads}) | pandas.read_csv |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 31.25, 4., 0., 1), (3, 4, 0, 15.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 4.0, 400.0, -np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 800.0, -100.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_signals_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
portfolio = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_max_orders(self):
_ = from_signals_all(price=price_wide)
_ = from_signals_all(price=price_wide, max_orders=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_all(price=price_wide, log=True)
_ = from_signals_all(price=price_wide, log=True, max_logs=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandom:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='rand_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples([(0.25, 0.25), (0.5, 0.5)], names=['rprob_entry_prob', 'rprob_exit_prob'])
)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_all(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='all', **kwargs)
def from_orders_longonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='longonly', **kwargs)
def from_orders_shortonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_all().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1), (8, 0, 2, 100.0, 1.0, 0.0, 0),
(9, 1, 2, 100.0, 2.0, 0.0, 1), (10, 3, 2, 50.0, 4.0, 0.0, 0), (11, 4, 2, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 0, 2, 100.0, 1.0, 0.0, 1), (5, 1, 2, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_all(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 198.01980198019803, 2.02, 0.0, 1),
(2, 3, 0, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 1),
(2, 3, 0, 49.504950495049506, 4.04, 0.0, 0), (3, 4, 0, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 3, 1, 1.0, 4.0, 0.4, 1), (7, 4, 1, 1.0, 5.0, 0.5, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 2.0, 0), (10, 3, 2, 1.0, 4.0, 4.0, 1), (11, 4, 2, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 3, 1, 1.0, 4.0, 0.1, 1), (7, 4, 1, 1.0, 5.0, 0.1, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 1.0, 0), (10, 3, 2, 1.0, 4.0, 1.0, 1), (11, 4, 2, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_all(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 3, 1, 1.0, 3.6, 0.0, 1), (7, 4, 1, 1.0, 5.5, 0.0, 0), (8, 0, 2, 1.0, 0.0, 0.0, 1),
(9, 1, 2, 1.0, 4.0, 0.0, 0), (10, 3, 2, 1.0, 0.0, 0.0, 1), (11, 4, 2, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 1, 0, 0.5, 2.0, 0.0, 0), (2, 3, 0, 0.5, 4.0, 0.0, 1),
(3, 4, 0, 0.5, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0), (8, 0, 2, 1.0, 1.0, 0.0, 1),
(9, 1, 2, 1.0, 2.0, 0.0, 0), (10, 3, 2, 1.0, 4.0, 0.0, 1), (11, 4, 2, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_all(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 3, 1, 1.0, 4.0, 0.0, 0),
(6, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 3, 1, 1.0, 4.0, 0.0, 0), (5, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 4, 1, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0), (4, 0, 1, 1000.0, 1.0, 0.0, 1), (5, 3, 1, 1000.0, 4.0, 0.0, 1),
(6, 4, 1, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 1, 0, 0, 0.0, 100.0, 2.0, 200.0, -np.inf, 0, 2, 2.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 2, 0, 0, 400.0, -100.0, 3.0, 100.0, np.nan, 0, 2, 3.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 3, 0, 0, 400.0, -100.0, 4.0, 0.0, np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 4, 0, 0, 0.0, 0.0, 5.0, 0.0, -np.inf, 0, 2, 5.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 1, 0, 200.0, 2.0, 0.0, 1),
(3, 3, 1, 200.0, 4.0, 0.0, 0), (4, 4, 1, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 3, 1, 100.0, 4.0, 0.0, 0),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_orders_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_target_shares(self):
record_arrays_close(
from_orders_all(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=75., size_type='targetshares',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_all(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 2, 1, 8.333333333333332, 3.0, 0.0, 0),
(8, 3, 1, 4.166666666666668, 4.0, 0.0, 0), (9, 4, 1, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 25.0, 2.0, 0.0, 0),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 0), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 0),
(4, 4, 0, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0),
(2, 1, 0, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 1, 2, 25.0, 2.0, 0.0, 0), (5, 2, 0, 8.333333333333332, 3.0, 0.0, 1),
(6, 2, 1, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 3, 0, 4.166666666666668, 4.0, 0.0, 1), (9, 3, 1, 4.166666666666668, 4.0, 0.0, 1),
(10, 3, 2, 4.166666666666668, 4.0, 0.0, 1), (11, 4, 0, 2.5, 5.0, 0.0, 1),
(12, 4, 1, 2.5, 5.0, 0.0, 1), (13, 4, 2, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 2, 1, 6.25, 3.0, 0.0, 0), (8, 3, 1, 2.34375, 4.0, 0.0, 0),
(9, 4, 1, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 37.5, 2.0, 0.0, 0), (2, 2, 0, 6.25, 3.0, 0.0, 0),
(3, 3, 0, 2.34375, 4.0, 0.0, 0), (4, 4, 0, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 0, 1, 2.50000000e+01, 1., 0., 0),
(2, 0, 2, 1.25000000e+01, 1., 0., 0), (3, 1, 0, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 1, 2, 7.81250000e-01, 2., 0., 0),
(6, 2, 0, 2.60416667e-01, 3., 0., 0), (7, 2, 1, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 3, 0, 2.44140625e-02, 4., 0., 0),
(10, 3, 1, 1.22070312e-02, 4., 0., 0), (11, 3, 2, 6.10351562e-03, 4., 0., 0),
(12, 4, 0, 2.44140625e-03, 5., 0., 0), (13, 4, 1, 1.22070312e-03, 5., 0., 0),
(14, 4, 2, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_all(price=price_wide)
_ = from_orders_all(price=price_wide, max_orders=9)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_all(price=price_wide, log=True)
_ = from_orders_all(price=price_wide, log=True, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, log=True, max_logs=14)
# ############# from_order_func ############# #
@njit
def order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col])
@njit
def log_order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col], log=True)
class TestFromOrderFunc:
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_one_column(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price.tolist(), order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(price, order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_multiple_columns(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (5, 0, 1, 100.0, 1.0, 0.0, 0),
(6, 1, 1, 200.0, 2.0, 0.0, 1), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_shape(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5,), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise,
keys=pd.Index(['first'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0, 1, 2], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise,
keys=pd.Index(['first', 'second', 'third'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first', 'second', 'third'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_group_by(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(8, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_cash_sharing(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 1, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 1, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 2, 200.0, 2.0, 0.0, 1),
(4, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(4, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (5, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def segment_prep_func_nb(sc, target_hold_value):
order_size = np.copy(target_hold_value[sc.i, sc.from_col:sc.to_col])
order_size_type = np.full(sc.group_len, SizeType.TargetValue)
direction = np.full(sc.group_len, Direction.All)
order_value_out = np.empty(sc.group_len, dtype=np.float_)
sc.last_val_price[sc.from_col:sc.to_col] = sc.close[sc.i, sc.from_col:sc.to_col]
nb.sort_call_seq_nb(sc, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(oc, order_size, order_size_type, direction):
col_i = oc.call_seq_now[oc.call_idx]
return nb.create_order_nb(
size=order_size[col_i],
size_type=order_size_type[col_i],
price=oc.close[oc.i, col_i],
direction=direction[col_i]
)
portfolio = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, segment_prep_func_nb=segment_prep_func_nb,
segment_prep_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_value(self, test_row_wise):
@njit
def target_val_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_val_order_func_nb(oc):
return nb.create_order_nb(size=50., size_type=SizeType.TargetValue, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
segment_prep_func_nb=target_val_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_percent(self, test_row_wise):
@njit
def target_pct_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_pct_order_func_nb(oc):
return nb.create_order_nb(size=0.5, size_type=SizeType.TargetPercent, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
segment_prep_func_nb=target_pct_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_init_cash(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=[1., 10., np.inf])
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 1.0, 0.0, 0),
(2, 0, 2, 10.0, 1.0, 0.0, 0), (3, 1, 0, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 1, 2, 10.0, 2.0, 0.0, 1),
(6, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 3, 0, 10.0, 4.0, 0.0, 1),
(10, 3, 1, 10.0, 4.0, 0.0, 1), (11, 3, 2, 10.0, 4.0, 0.0, 1),
(12, 4, 0, 8.0, 5.0, 0.0, 0), (13, 4, 1, 8.0, 5.0, 0.0, 0),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 2.0, 0.0, 1),
(2, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (3, 3, 0, 10.0, 4.0, 0.0, 1),
(4, 4, 0, 8.0, 5.0, 0.0, 0), (5, 0, 1, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 3, 1, 10.0, 4.0, 0.0, 1), (9, 4, 1, 8.0, 5.0, 0.0, 0),
(10, 0, 2, 10.0, 1.0, 0.0, 0), (11, 1, 2, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 3, 2, 10.0, 4.0, 0.0, 1),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(portfolio._init_cash) == np.ndarray
base_portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=np.inf)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.Auto)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.Auto
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.AutoAlign)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def group_prep_func_nb(gc, call_i, group_lst):
call_i[0] += 1
group_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,)
)
assert call_i[0] == 28
assert list(sim_lst) == [1]
assert list(group_lst) == [2, 18]
assert list(segment_lst) == [3, 6, 9, 12, 15, 19, 21, 23, 25, 27]
assert list(order_lst) == [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 20, 22, 24, 26, 28]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, True],
[False, False],
[False, True],
[False, False],
[False, True],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask
)
assert call_i[0] == 8
assert list(sim_lst) == [1]
assert list(group_lst) == [2]
assert list(segment_lst) == [3, 5, 7]
assert list(order_lst) == [4, 6, 8]
def test_func_calls_row_wise(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def row_prep_func_nb(gc, call_i, row_lst):
call_i[0] += 1
row_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
row_wise=True
)
assert call_i[0] == 31
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 8, 14, 20, 26]
assert list(segment_lst) == [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]
assert list(order_lst) == [4, 5, 7, 10, 11, 13, 16, 17, 19, 22, 23, 25, 28, 29, 31]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask,
row_wise=True
)
assert call_i[0] == 14
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 5, 9]
assert list(segment_lst) == [3, 6, 10, 13]
assert list(order_lst) == [4, 7, 8, 11, 12, 14]
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_orders(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=14)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_logs(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=14)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'all']
group_by = pd.Index(['first', 'first', 'second'], name='group')
portfolio = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D'
) # independent
portfolio_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D'
) # grouped
portfolio_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D'
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
assert vbt.Portfolio.loads(portfolio['a'].dumps()) == portfolio['a']
assert vbt.Portfolio.loads(portfolio.dumps()) == portfolio
portfolio.save(tmp_path / 'portfolio')
assert vbt.Portfolio.load(tmp_path / 'portfolio') == portfolio
def test_wrapper(self):
pd.testing.assert_index_equal(
portfolio.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
price_na.columns
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.grouper.group_by is None
assert portfolio.wrapper.grouper.allow_enable
assert portfolio.wrapper.grouper.allow_disable
assert portfolio.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.columns,
price_na.columns
)
assert portfolio_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.grouper.group_by,
group_by
)
assert portfolio_grouped.wrapper.grouper.allow_enable
assert portfolio_grouped.wrapper.grouper.allow_disable
assert portfolio_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_shared.wrapper.columns,
price_na.columns
)
assert portfolio_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_shared.wrapper.grouper.group_by,
group_by
)
assert not portfolio_shared.wrapper.grouper.allow_enable
assert portfolio_shared.wrapper.grouper.allow_disable
assert not portfolio_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert portfolio['a'].wrapper == portfolio.wrapper['a']
assert portfolio['a'].orders == portfolio.orders['a']
assert portfolio['a'].logs == portfolio.logs['a']
assert portfolio['a'].init_cash == portfolio.init_cash['a']
pd.testing.assert_series_equal(portfolio['a'].call_seq, portfolio.call_seq['a'])
assert portfolio['c'].wrapper == portfolio.wrapper['c']
assert portfolio['c'].orders == portfolio.orders['c']
assert portfolio['c'].logs == portfolio.logs['c']
assert portfolio['c'].init_cash == portfolio.init_cash['c']
pd.testing.assert_series_equal(portfolio['c'].call_seq, portfolio.call_seq['c'])
assert portfolio[['c']].wrapper == portfolio.wrapper[['c']]
assert portfolio[['c']].orders == portfolio.orders[['c']]
assert portfolio[['c']].logs == portfolio.logs[['c']]
pd.testing.assert_series_equal(portfolio[['c']].init_cash, portfolio.init_cash[['c']])
pd.testing.assert_frame_equal(portfolio[['c']].call_seq, portfolio.call_seq[['c']])
assert portfolio_grouped['first'].wrapper == portfolio_grouped.wrapper['first']
assert portfolio_grouped['first'].orders == portfolio_grouped.orders['first']
assert portfolio_grouped['first'].logs == portfolio_grouped.logs['first']
assert portfolio_grouped['first'].init_cash == portfolio_grouped.init_cash['first']
pd.testing.assert_frame_equal(portfolio_grouped['first'].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped[['first']].wrapper == portfolio_grouped.wrapper[['first']]
assert portfolio_grouped[['first']].orders == portfolio_grouped.orders[['first']]
assert portfolio_grouped[['first']].logs == portfolio_grouped.logs[['first']]
pd.testing.assert_series_equal(
portfolio_grouped[['first']].init_cash,
portfolio_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_grouped[['first']].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped['second'].wrapper == portfolio_grouped.wrapper['second']
assert portfolio_grouped['second'].orders == portfolio_grouped.orders['second']
assert portfolio_grouped['second'].logs == portfolio_grouped.logs['second']
assert portfolio_grouped['second'].init_cash == portfolio_grouped.init_cash['second']
pd.testing.assert_series_equal(portfolio_grouped['second'].call_seq, portfolio_grouped.call_seq['c'])
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].wrapper == portfolio_grouped.wrapper[['second']]
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].logs == portfolio_grouped.logs[['second']]
pd.testing.assert_series_equal(
portfolio_grouped[['second']].init_cash,
portfolio_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_grouped[['second']].call_seq, portfolio_grouped.call_seq[['c']])
assert portfolio_shared['first'].wrapper == portfolio_shared.wrapper['first']
assert portfolio_shared['first'].orders == portfolio_shared.orders['first']
assert portfolio_shared['first'].logs == portfolio_shared.logs['first']
assert portfolio_shared['first'].init_cash == portfolio_shared.init_cash['first']
pd.testing.assert_frame_equal(portfolio_shared['first'].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].wrapper == portfolio_shared.wrapper[['first']]
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].logs == portfolio_shared.logs[['first']]
pd.testing.assert_series_equal(
portfolio_shared[['first']].init_cash,
portfolio_shared.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_shared[['first']].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared['second'].wrapper == portfolio_shared.wrapper['second']
assert portfolio_shared['second'].orders == portfolio_shared.orders['second']
assert portfolio_shared['second'].logs == portfolio_shared.logs['second']
assert portfolio_shared['second'].init_cash == portfolio_shared.init_cash['second']
pd.testing.assert_series_equal(portfolio_shared['second'].call_seq, portfolio_shared.call_seq['c'])
assert portfolio_shared[['second']].wrapper == portfolio_shared.wrapper[['second']]
assert portfolio_shared[['second']].orders == portfolio_shared.orders[['second']]
assert portfolio_shared[['second']].logs == portfolio_shared.logs[['second']]
pd.testing.assert_series_equal(
portfolio_shared[['second']].init_cash,
portfolio_shared.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_shared[['second']].call_seq, portfolio_shared.call_seq[['c']])
def test_regroup(self):
assert portfolio.regroup(None) == portfolio
assert portfolio.regroup(False) == portfolio
assert portfolio.regroup(group_by) != portfolio
pd.testing.assert_index_equal(portfolio.regroup(group_by).wrapper.grouper.group_by, group_by)
assert portfolio_grouped.regroup(None) == portfolio_grouped
assert portfolio_grouped.regroup(False) != portfolio_grouped
assert portfolio_grouped.regroup(False).wrapper.grouper.group_by is None
assert portfolio_grouped.regroup(group_by) == portfolio_grouped
assert portfolio_shared.regroup(None) == portfolio_shared
with pytest.raises(Exception) as e_info:
_ = portfolio_shared.regroup(False)
assert portfolio_shared.regroup(group_by) == portfolio_shared
def test_cash_sharing(self):
assert not portfolio.cash_sharing
assert not portfolio_grouped.cash_sharing
assert portfolio_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
portfolio.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_incl_unrealized(self):
assert not vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=False).incl_unrealized
assert vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=True).incl_unrealized
def test_orders(self):
record_arrays_close(
portfolio.orders.values,
np.array([
(0, 1, 0, 0.1, 2.02, 0.10202, 0), (1, 2, 0, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 4, 0, 1.0, 5.05, 0.1505, 0), (3, 0, 1, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 3, 1, 0.1, 4.04, 0.10404000000000001, 0),
(6, 4, 1, 1.0, 4.95, 0.14950000000000002, 1), (7, 0, 2, 1.0, 1.01, 0.1101, 0),
(8, 1, 2, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 3, 2, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
portfolio.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, np.nan, 100.0, 1.0, 0, 0, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.0, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 1, 0, 0, 100.0, 0.0, 2.0, 100.0, 0.1, 0, 0, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.69598, 0.1, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 2, 0, 0, 99.69598, 0.1, 3.0, 99.99598, -1.0, 0, 0, 3.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 3, 0, 0, 99.89001, 0.0, 4.0, 99.89001, -0.1, 0, 0, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 4, 0, 0, 99.89001, 0.0, 5.0, 99.89001, 1.0, 0, 0, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 94.68951, 1.0, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 0, 1, 1, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 1, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.8801, -1.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 2.0, 98.8801, 0.1, 0, 1, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 2, 1, 1, 100.97612, -1.1, np.nan, np.nan, -1.0, 0, 1, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 3, 1, 1, 100.97612, -1.1, 4.0, 96.57611999999999, -0.1, 0, 1, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.46808, -1.0, 0.1, 4.04, 0.10404000000000001, 0, 0, -1, 5),
(9, 4, 1, 1, 100.46808, -1.0, 5.0, 95.46808, 1.0, 0, 1, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 105.26858, -2.0, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 0, 2, 2, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 2, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.8799, 1.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 1, 2, 2, 98.8799, 1.0, 2.0, 100.8799, 0.1, 0, 2, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.57588000000001, 1.1, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 3.0, 101.87588000000001, -1.0, 0, 2, 3.0,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True, 101.41618000000001,
0.10000000000000009, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 3, 2, 2, 101.41618000000001, 0.10000000000000009, 4.0, 101.81618000000002,
-0.1, 0, 2, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True,
101.70822000000001, 0.0, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 4, 2, 2, 101.70822000000001, 0.0, np.nan, 101.70822000000001, 1.0, 0, 2, np.nan, 0.01, 0.1, 0.01,
1e-08, np.inf, 0.0, True, False, True, 101.70822000000001, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.logs.count(),
result
)
def test_trades(self):
record_arrays_close(
portfolio.trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.trades.count(),
result
)
def test_positions(self):
record_arrays_close(
portfolio.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1)
], dtype=position_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
portfolio.drawdowns.values,
np.array([
(0, 0, 0, 4, 4, 0), (1, 1, 0, 4, 4, 0), (2, 2, 2, 3, 4, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(portfolio.close, price_na)
pd.testing.assert_frame_equal(portfolio_grouped.close, price_na)
pd.testing.assert_frame_equal(portfolio_shared.close, price_na)
def test_fill_close(self):
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=False),
price_na
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=False),
price_na.ffill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=True),
price_na.bfill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=True),
price_na.ffill().bfill()
)
def test_share_flow(self):
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.share_flow(),
result
)
def test_shares(self):
pd.testing.assert_frame_equal(
portfolio.shares(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.shares(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.shares(),
result
)
def test_pos_mask(self):
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(),
result
)
def test_pos_coverage(self):
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('pos_coverage')
)
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('pos_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
portfolio.cash_flow(short_cash=False),
pd.DataFrame(
np.array([
[0., -1.0999, -1.1201],
[-0.30402, -0.29998, -0.30402],
[0.19403, 0., 2.8403],
[0., 0.29996, 0.29204],
[-5.2005, -5.0995, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
portfolio.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
portfolio_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
portfolio.cash(short_cash=False),
pd.DataFrame(
np.array([
[100., 98.9001, 98.8799],
[99.69598, 98.60012, 98.57588],
[99.89001, 98.60012, 101.41618],
[99.89001, 98.90008, 101.70822],
[94.68951, 93.80058, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(),
result
)
def test_holding_value(self):
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., np.nan, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., np.nan, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[np.nan, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 0.01001, 0.],
[0., 0.02182537, 0.],
[0., np.nan, 0.],
[0., 0.03887266, 0.],
[0., 0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -0.01021449, 0.01001202],
[0.00200208, -0.02282155, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.0421496, 0.],
[0.05015573, -0.11933092, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.00505305, 0.01001202],
[0.00100052, -0.01120162, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.02052334, 0.],
[0.02503887, -0.05440679, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005003, 0.01001202],
[-0.01006684, 0.02183062],
[np.nan, 0.00294938],
[-0.02037095, 0.],
[-0.02564654, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0., -0.01001, 0.01001202],
[0.00200208, -0.02182537, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.03887266, 0.],
[0.05015573, -0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0050025, 0.01001202],
[0.00100052, -0.01095617, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.01971414, 0.],
[0.02503887, -0.04906757, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00495344, 0.01001202],
[-0.00984861, 0.02183062],
[np.nan, 0.00294938],
[-0.01957348, 0.],
[-0.02323332, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, np.nan, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, np.nan, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[np.nan, np.nan, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[np.nan, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[np.nan, np.nan, 9.33060570e-03],
[0.0, np.nan, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[np.nan, 9.33060570e-03],
[np.nan, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(),
result
)
def test_active_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, np.nan, 0.42740909],
[0., np.nan, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[np.nan, 0.42740909],
[np.nan, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.active_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(),
result
)
def test_market_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(),
result
)
def test_market_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(),
result
)
def test_total_market_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.0005995, -0.001201],
[-0.0066395, 0.0077588],
[-0.0066395, 0.0171618],
[-0.0066395, 0.0170822],
[-0.01372199, 0.0170822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0005995, -0.001201],
[-0.0005201, -0.0061194, 0.0077588],
[-0.00054995, -0.0061194, 0.0171618],
[-0.00054995, -0.0061194, 0.0170822],
[-0.00155245, -0.01218736, 0.0170822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(),
pd.Series(
np.array([-20.82791491, 10.2576347]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-66.19490297745766, -19.873024060759022]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-25.06639947, 12.34506527]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-11.058998255347488, -21.39151322377427, 10.257634695847853]),
index=price_na.columns
).rename('sharpe_ratio')
)
def test_stats(self):
pd.testing.assert_series_equal(
portfolio.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -1.1112299999999966,
-1.1112299999999966, 283.3333333333333, 66.66666666666667,
1.6451238489727062, 1.6451238489727062,
pd.Timedelta('3 days 08:00:00'), pd.Timedelta('3 days 08:00:00'),
1.3333333333333333, 33.333333333333336, -98.38058805880588,
-100.8038553855386, -99.59222172217225,
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 04:00:00'),
0.10827272727272726, 1.2350921335789007, -0.01041305691622876,
-7.373390156195147, 25.695952942372134, 5717.085878360386
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='stats_mean')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364,
-11.057783842772304, -9.75393669809172, -46.721467294341814
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(required_return=0.1, risk_free=0.01),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364,
-188.9975847831419, -15.874008737030774, -46.721467294341814
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(active_returns=True),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364, np.nan, np.nan, np.nan
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(incl_unrealized=True),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 2, 0.0, -3.9702970297029667,
-54.450495049504966, -29.210396039603964,
| pd.Timedelta('1 days 00:00:00') | pandas.Timedelta |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import os
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from os.path import join as pjoin
from cplvm import CPLVM
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
DATA_DIR = "../../data/mix_seq/data/nutlin/"
if __name__ == "__main__":
latent_dim_shared = 2
latent_dim_foreground = 2
X_fname = pjoin(DATA_DIR, "dmso_expt1.csv")
Y_fname = pjoin(DATA_DIR, "nutlin_expt1.csv")
X_mutation_fname = pjoin(DATA_DIR, "p53_mutations_dmso.csv")
Y_mutation_fname = pjoin(DATA_DIR, "p53_mutations_nutlin.csv")
p53_mutations_X = pd.read_csv(X_mutation_fname, index_col=0)
p53_mutations_X.tp53_mutation[
p53_mutations_X.tp53_mutation == "Hotspot"
] = "Mutated"
p53_mutations_X.tp53_mutation[
p53_mutations_X.tp53_mutation == "Other"
] = "Wild-type"
p53_mutations_Y = pd.read_csv(Y_mutation_fname, index_col=0)
p53_mutations_Y.tp53_mutation[
p53_mutations_Y.tp53_mutation == "Hotspot"
] = "Mutated"
p53_mutations_Y.tp53_mutation[
p53_mutations_Y.tp53_mutation == "Other"
] = "Wild-type"
# Read in data
X = pd.read_csv(X_fname, index_col=0)
Y = | pd.read_csv(Y_fname, index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
def prepare_titanic(test_size=0.3, random_state=123):
print('Download or read from disk.')
ds = tfds.load('titanic', split='train')
# Turn DataSet adapter into DataFrame
print('Convert to pandas.DataFrame')
X = []
y = []
for ex in tfds.as_numpy(ds):
x_i, y_i = ex['features'], ex['survived']
X.append(x_i)
y.append(y_i)
df_X = pd.DataFrame(X)
features = list(df_X.columns)
y = | pd.Series(y, name='survived') | pandas.Series |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import pandas as pd
import pytz
from pm4py.util.constants import CASE_CONCEPT_NAME
from pm4py.algo.filtering.common.timestamp.timestamp_common import get_dt_from_string
from pm4py.util.xes_constants import DEFAULT_TIMESTAMP_KEY
from pm4py.util.constants import PARAMETER_CONSTANT_TIMESTAMP_KEY, PARAMETER_CONSTANT_CASEID_KEY
from enum import Enum
from pm4py.util import exec_utils
from copy import copy
from typing import Optional, Dict, Any, Union, Tuple, List
import pandas as pd
import datetime
class Parameters(Enum):
TIMESTAMP_KEY = PARAMETER_CONSTANT_TIMESTAMP_KEY
CASE_ID_KEY = PARAMETER_CONSTANT_CASEID_KEY
def filter_traces_contained(df: pd.DataFrame, dt1: Union[str, datetime.datetime], dt2: Union[str, datetime.datetime], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> pd.DataFrame:
"""
Get traces that are contained in the given interval
Parameters
----------
df
Pandas dataframe
dt1
Lower bound to the interval (possibly expressed as string, but automatically converted)
dt2
Upper bound to the interval (possibly expressed as string, but automatically converted)
parameters
Possible parameters of the algorithm, including:
Parameters.TIMESTAMP_KEY -> Attribute to use as timestamp
Parameters.CASE_ID_KEY -> Column that contains the timestamp
Returns
----------
df
Filtered dataframe
"""
if parameters is None:
parameters = {}
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
dt1 = get_dt_from_string(dt1)
dt2 = get_dt_from_string(dt2)
dt1 = dt1.replace(tzinfo=pytz.utc)
dt2 = dt2.replace(tzinfo=pytz.utc)
dt1 = pd.to_datetime(dt1, utc=True)
dt2 = pd.to_datetime(dt2, utc=True)
grouped_df = df[[case_id_glue, timestamp_key]].groupby(df[case_id_glue])
first = grouped_df.first()
last = grouped_df.last()
last.columns = [str(col) + '_2' for col in last.columns]
stacked = pd.concat([first, last], axis=1)
stacked = stacked[stacked[timestamp_key] >= dt1]
stacked = stacked[stacked[timestamp_key + "_2"] <= dt2]
i1 = df.set_index(case_id_glue).index
i2 = stacked.set_index(case_id_glue).index
ret = df[i1.isin(i2)]
ret.attrs = copy(df.attrs) if hasattr(df, 'attrs') else {}
return ret
def filter_traces_intersecting(df: pd.DataFrame, dt1: Union[str, datetime.datetime], dt2: Union[str, datetime.datetime], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> pd.DataFrame:
"""
Filter traces intersecting the given interval
Parameters
----------
df
Pandas dataframe
dt1
Lower bound to the interval (possibly expressed as string, but automatically converted)
dt2
Upper bound to the interval (possibly expressed as string, but automatically converted)
parameters
Possible parameters of the algorithm, including:
Parameters.TIMESTAMP_KEY -> Attribute to use as timestamp
Parameters.CASE_ID_KEY -> Column that contains the timestamp
Returns
----------
df
Filtered dataframe
"""
if parameters is None:
parameters = {}
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
dt1 = get_dt_from_string(dt1)
dt2 = get_dt_from_string(dt2)
dt1 = dt1.replace(tzinfo=pytz.utc)
dt2 = dt2.replace(tzinfo=pytz.utc)
dt1 = | pd.to_datetime(dt1, utc=True) | pandas.to_datetime |
import numpy as np
import re as re
from scipy import stats
import gnc
import netCDF4 as nc
import copy as pcopy
import pdb
import pb
import pandas as pa
def Dic_DataFrame_to_Excel(excel_file,dic_df,multisheet=False,keyname=True,na_rep='', cols=None, header=True, index=True, index_label=None):
"""
Write a dictionary of pandas.DataFrame data to an excel file with keys as excel sheets.
"""
def _df_member(obj):
"""
check if this obj is DataFrame obj.
"""
if isinstance(obj,pa.core.frame.DataFrame):
return True
else:
raise TypeError('this is not a DataFrame object')
for key,dfm in dic_df.items():
_df_member(dfm)
excel_wob= | pa.ExcelWriter(excel_file) | pandas.ExcelWriter |
# coding: utf-8
# In[2]:
#Spam filtering
import numpy as np
import pandas as pd
import os
import email
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import StratifiedKFold
from sklearn.naive_bayes import MultinomialNB
from sklearn import datasets, linear_model
from sklearn.metrics import confusion_matrix
from bs4 import BeautifulSoup
import re
#removing extranous characters
def data_from_file():
target = []
index = []
rows = []
#importing non-spam folder's file
flist = os.listdir("spamassasin\\ham")
for f in flist:
ifile=open("spamassasin\\ham\\" + f, encoding = "ISO-8859-1")
rawtext=""
rawtext = file_read(ifile)
msg = email.message_from_string(rawtext)
subject = str(msg['Subject'])
body = email_parse_subject_body(rawtext)
subjectandbody=subject + "\n" + body
rows.append({'text': subjectandbody, 'class': 0})
index.append(f)
#importing spam folder's file
flist = os.listdir("spamassasin\\spam")
for f in flist:
ifile=open("spamassasin\\spam\\" + f, encoding = "ISO-8859-1")
rawtext=""
rawtext = file_read(ifile)
msg = email.message_from_string(rawtext)
subject = str(msg['Subject'])
body = email_parse_subject_body(rawtext)
subjectandbody = subject + "\n" + body
rows.append({'text': subjectandbody, 'class': 1})
index.append(f)
data_frame_from_email_and_class = | pd.DataFrame(rows, index=index) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# PAQUETES PARA CORRER OP.
import numpy as np
import pandas as pd
import datetime as dt
import json
import wmf.wmf as wmf
import hydroeval
import glob
import SHop
import hidrologia
import os
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context('notebook', font_scale=1.13)
#FORMATO
# fuente
import matplotlib
matplotlib.use('Agg')
import matplotlib.font_manager as fm
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
font_dirs = ['/home/socastillogi/jupyter/fuentes/AvenirLTStd-Book']
font_files = font_manager.findSystemFonts(fontpaths=font_dirs)
font_list = font_manager.createFontList(font_files)
font_manager.fontManager.ttflist.extend(font_list)
matplotlib.rcParams['font.family'] = 'Avenir LT Std'
matplotlib.rcParams['font.size']=11
import pylab as pl
#axes
# pl.rc('axes',labelcolor='#4f4f4f')
# pl.rc('axes',linewidth=1.5)
# pl.rc('axes',edgecolor='#bdb9b6')
pl.rc('text',color= '#4f4f4f')
#avoid warnings
import warnings
warnings.filterwarnings('ignore')
############################################################################################ FECHA
date_ev = pd.to_datetime('2021-03-09 18:00')
############################################################################################ ARGUMENTOS
print (dt.datetime.now())
ruta_proj = '/home/socastillogi/jupyter/SH_op/SHop_E260_90m_1d/SHop/project_files/'
configfile=ruta_proj+'inputs/configfile_SHop_E260_90m_1d.md'
save_hist = False #####################################################False for first times
dateformat_starts = '%Y-%m-%d'
date = pd.to_datetime(date_ev.strftime(dateformat_starts))
ConfigList= SHop.get_rutesList(configfile)
############################################################################################ EJECUCION
ConfigList= SHop.get_rutesList(configfile)
# abrir simubasin
path_ncbasin = SHop.get_ruta(ConfigList,'ruta_proj')+SHop.get_ruta(ConfigList,'ruta_nc')
cu = wmf.SimuBasin(rute=path_ncbasin)
#sets para correr modelo.
SHop.set_modelsettings(ConfigList)
warming_steps = 0#pasos de simulacion, dependen del dt.
warming_window ='%ss'%int(wmf.models.dt * warming_steps) #siempre en seg
dateformat_starts = '%Y-%m-%d'
starts = ['%ss'%(90*24*60*60)]#,'%ss'%(90*24*60*60)] #60d back
starts_names = ['90d']#,'1d'] #starts y starts_names deben ser del mismo len.
window_end = '0s' #none
print ('######')
print ('Start DAILY execution: %s'%dt.datetime.now())
#dates
date = (pd.to_datetime( | pd.to_datetime(date) | pandas.to_datetime |
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import itertools
# def plot_confusion_matrix(cm, classes,
# normalize=False,
# title='Confusion matrix',
# cmap=plt.cm.Blues):
# """
# This function prints and plots the confusion matrix.
# Normalization can be applied by setting `normalize=True`.
# """
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
# print(cm)
# plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
# plt.colorbar()
# tick_marks = np.arange(len(classes))
# plt.xticks(tick_marks, classes, rotation=45)
# plt.yticks(tick_marks, classes)
# fmt = '.2f' if normalize else 'd'
# thresh = cm.max() / 2.
# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], fmt),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
# plt.tight_layout()
# plt.ylabel('True label')
# plt.xlabel('Predicted label')
data = pd.read_csv("../data/output/test.csv")
data["code"] = | pd.factorize(data['age']) | pandas.factorize |
from keyword import kwlist
import re
from pandas import DataFrame, Series, Index, MultiIndex
from typing import Union, List, Dict, Iterable
def reindex_series(series: Series, target_series: Series, source_levels: List[int] = None,
target_levels: List[int] = None, fill_value: Union[int, float] = None) -> Series:
# Make shallow copies of the source and target series in case their indexes need to be changed
series = series.copy(deep=False)
target_series = target_series.copy(deep=False)
if series.index.nlevels > 1 and source_levels is not None:
arrays = [series.index.get_level_values(level) for level in source_levels]
series.index = | MultiIndex.from_arrays(arrays) | pandas.MultiIndex.from_arrays |
### Report Rebalance& Grid !!!!! ####
# import neccessary package
import ccxt
import json
import numpy as np
import pandas as pd
import time
import decimal
from datetime import datetime
import pytz
import csv
import sys
# Api and secret
api_key = ""
api_secret = ""
subaccount = ""
# Set your account name (ตั้งชื่อ Account ที่ต้องการให้แสดงผลในไฟล์ report)
account_name = "Report_Rebalance"
# Exchange Details
exchange = ccxt.ftx({
'apiKey': api_key,
'secret': api_secret,
'enableRateLimit': True}
)
exchange.headers = {'FTX-SUBACCOUNT': subaccount,}
post_only = True # Maker or Taker (วางโพซิชั่นเป็น MAKER เท่านั้นหรือไม่ True = ใช่)
# Global Varibale Setting
token_name_lst = ["SOL"] # --- change ----# Name of Token (ใส่ชื่อเหรียญที่ต้องการ)
pair_lst = ["SOL/USD"] # --- change ----# Pair (ใส่ชื่อคู่ที่ต้องการ Rebalance เช่น XRP จะเป็น ["XRP/USD"])
pair = "SOL/USD" # --- change ----# Pair (ใส่ชื่อคู่ที่ต้องการ Rebalance เช่น XRP จะเป็น "XRP/USD")
pair_dict = {token_name_lst[i]: pair_lst[i] for i in range(len(token_name_lst))}
# file system
tradelog_file = "{}_TradingLog.csv".format(account_name)
trading_call_back = 1000 # --- change ----# จำนวนการดึง transaction ออกมาย้อนหลัง
def get_time():
named_tuple = time.localtime() # get struct_time
Time = time.strftime("%m/%d/%Y, %H:%M:%S", named_tuple)
return Time
def get_wallet_details():
wallet = exchange.privateGetWalletBalances()['result']
return wallet
def get_minimum_size():
minimum_size = float(exchange.fetch_ticker(pair)['info']['minProvideSize'])
return minimum_size
def checkDB():
try:
tradinglog = pd.read_csv("{}_tradinglog.csv".format(account_name))
print('DataBase Exist Loading DataBase....')
except:
tradinglog = pd.DataFrame(columns=['id', 'timestamp', 'date','time', 'pair', 'side', 'price', 'qty', 'cost', 'fee', 'liquidity', 'bot_name', 'subaccount'])
tradinglog.to_csv("{}_tradinglog.csv".format(account_name),index=False)
print("Database Created")
return tradinglog
def get_trade_history(pair):
pair = pair
trade_history = pd.DataFrame(exchange.fetchMyTrades(pair, limit = trading_call_back),
columns=['id', 'timestamp', 'timestamp','datetime', 'symbol', 'side', 'price', 'amount', 'cost', 'fee', 'takerOrMaker'])
cost=[]
for i in range(len(trade_history)):
fee = trade_history['fee'].iloc[i]['cost'] if trade_history['fee'].iloc[i]['currency'] == 'USD' else trade_history['fee'].iloc[i]['cost'] * trade_history['price'].iloc[i]
cost.append(fee) # ใน fee เอาแค่ cost
trade_history['fee'] = cost
return trade_history
def get_last_id(pair):
pair = pair
trade_history = get_trade_history(pair)
last_trade_id = (trade_history.iloc[:trading_call_back]['id'])
return last_trade_id
def update_trade_log():
checkDB()
tradinglog = pd.read_csv("{}_tradinglog.csv".format(account_name))
last_trade_id = get_last_id(pair)
trade_history = get_trade_history(pair)
for i in last_trade_id:
tradinglog = pd.read_csv("{}_tradinglog.csv".format(account_name))
trade_history = get_trade_history(pair)
#print(trade_history)
if int(i) not in tradinglog.values:
print(i not in tradinglog.values)
last_trade = trade_history.loc[trade_history['id'] == i]
list_last_trade = last_trade.values.tolist()[0]
# แปลงวันที่ใน record
d = datetime.strptime(list_last_trade[3], "%Y-%m-%dT%H:%M:%S.%fZ")
d = pytz.timezone('Etc/GMT+7').localize(d)
d = d.astimezone(pytz.utc)
Date = d.strftime("%Y-%m-%d")
Time = d.strftime("%H:%M:%S")
# edit & append ข้อมูลก่อน add เข้า database
list_last_trade[2] = Date
list_last_trade[3] = Time
list_last_trade.append(account_name)
list_last_trade.append(subaccount)
with open("{}_tradinglog.csv".format(account_name), "a+", newline='') as fp:
wr = csv.writer(fp, dialect='excel')
wr.writerow(list_last_trade)
print('Recording Trade ID : {}'.format(i))
else:
print('Trade Already record')
print("Calculating.")
# Create Buy and Sell dataframe separately
def Buy_Sell_Dataframe(trade_history , pair):
print("Calculating...")
min_trade_size = get_minimum_size()
trade_history_buy = pd.DataFrame(trade_history)
trade_history_sell = | pd.DataFrame(trade_history) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 14:25:57 2019
@author: skoebric
"""
"""
TODO:
- confirm how net metering is read in
- create agent csv with data we already have
- move agent creation into dgen_model based on params in config?
"""
# --- Python Battery Imports ---
import os
import itertools
import json
from distutils.dir_util import copy_tree
# --- External Library Imports ---
import pandas as pd
import geopandas as gpd
import numpy as np
from shapely.ops import nearest_points
from shapely.geometry import Point, shape
import shapely
# --- Module Imports ---
import agent_config as config
import helper
import agent_sampling as samp
pd.options.mode.chained_assignment = None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~ Functions ~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# --- read lookups ---
state_id_lookup = pd.read_csv(os.path.join('reference_data', 'india_census','state_id_lookup.csv'))
state_id_lookup = dict(zip(state_id_lookup['state_name'], state_id_lookup['state_id']))
def wholesale_rates(agent_df):
"""
Net Billing avoided cost. Creates 'wholesale_elec_usd_per_kwh'.
Used if 'compensation_style' == 'Net Billing (Wholesale)'
Columns
-------
state_id (int) : lookup from census data
2014-2050 (float) : annual value
"""
reeds = pd.read_csv(os.path.join('reference_data','reeds_output_margcost_state.csv'))
reeds.columns = ['state_name','year','scenario','variable','cost']
reeds = reeds.loc[reeds['scenario'] == 'Base']
reeds = reeds.loc[reeds['variable'] == 'mc.total']
# --- pivot to wide ---
wholesale_rates = reeds.pivot_table(index=['state_name'], columns=['year'], values='cost')
wholesale_rates = wholesale_rates.reset_index(drop=False)
wholesale_rates['state_name'] = wholesale_rates['state_name'].replace('delhi', 'nct_of_delhi')
wholesale_rates.loc[wholesale_rates['state_name'] != 'telangana']
# --- add in earlier years ---
annual_diff = (wholesale_rates[2018] - wholesale_rates[2017]) / wholesale_rates[2018]
for y_index, y in enumerate([2014,2015,2016]):
wholesale_rates[y] = wholesale_rates[2017] * (1 - annual_diff)**(3-y_index)
# --- add in later years ---
annual_diff = (wholesale_rates[2047] - wholesale_rates[2046]) / wholesale_rates[2047]
for y_index, y in enumerate([2048,2049,2050]):
wholesale_rates[y] = wholesale_rates[2047] * (1 + annual_diff)**(y_index + 1)
# --- fuzzy string matching ---
clean_list = list(agent_df['state_name'].unique())
wholesale_rates['state_name'] = wholesale_rates['state_name'].apply(helper.sanitize_string)
wholesale_rates['state_name'] = helper.fuzzy_address_matcher(wholesale_rates['state_name'], clean_list)
# --- any missing states ---
avg_wholesale_rates = wholesale_rates[list(range(2014,2051))].mean()
for state in clean_list:
if state not in set(wholesale_rates['state_name']):
state_wholesale_rates = avg_wholesale_rates.copy()
state_wholesale_rates['state_name'] = state
wholesale_rates = wholesale_rates.append(state_wholesale_rates, ignore_index=True)
# --- drop any duplicates ---
wholesale_rates = wholesale_rates.drop_duplicates(subset=['state_name'])
# --- map state id ---
wholesale_rates['state_id'] = wholesale_rates['state_name'].map(state_id_lookup)
wholesale_rates.drop(['state_name'], axis='columns', inplace=True)
# --- currency conversion ---
wholesale_rates[list(range(2014,2051))] = wholesale_rates[list(range(2014,2051))] / config.RUPPES_TO_USD
# --- reorder columns ---
wholesale_rates = wholesale_rates[['state_id'] + list(range(2014,2051))]
wholesale_rates.to_csv(os.path.join('india_base','wholesale_rates.csv'), index=False)
def financing_rates(agent_df):
"""
Create .csv with discount rates by sector/geography, scaled by a social indicator score.
Columns
-------
state_id (int) : integer representation of state
sector_abbr (str) : the sector of the agent
loan_rate (float) : the annual interest rate on a loan.
real_discount (float) : the discount rate of the state/sector in percent
down_payment (float) : percent of downpayment towards system (typically 0.2 to compare apples to apples in WACC)
"""
# --- Take dict of controlregions by social indicator ---
state_social_df = agent_df[['state_id','social_indicator']].drop_duplicates()
# --- Permute by sector ---
social_dfs = []
for s in ['res','com','ind']:
_state_social_df = state_social_df.copy()
_state_social_df['sector_abbr'] = s
social_dfs.append(_state_social_df)
finance_df = pd.concat(social_dfs, axis='rows')
finance_df = finance_df.reset_index(drop=True)
social_max = finance_df['social_indicator'].max()
social_min = finance_df['social_indicator'].min()
#TODO: I'm guessing this will break when MAX_DP and MIN_DP are the same because of division by zero
# --- inverse normalization of discount rate (i.e. lower social indicator has higher discount rate) ---
finance_df.loc[finance_df['sector_abbr'] == 'res', 'discount_rate'] = ((config.RES_MAX_DR - config.RES_MIN_DR)/(social_max - social_min)) * (social_max - finance_df['social_indicator']) + config.RES_MIN_DR
finance_df.loc[finance_df['sector_abbr'] == 'com', 'discount_rate'] = ((config.COM_MAX_DR - config.COM_MIN_DR)/(social_max - social_min)) * (social_max - finance_df['social_indicator']) + config.COM_MIN_DR
finance_df.loc[finance_df['sector_abbr'] == 'ind', 'discount_rate'] = ((config.IND_MAX_DR - config.IND_MIN_DR)/(social_max - social_min)) * (social_max - finance_df['social_indicator']) + config.IND_MIN_DR
# --- inverse normalization of loan rate(i.e. lower social indicator has higher loan rate ---
finance_df.loc[finance_df['sector_abbr'] == 'res', 'loan_rate'] = ((config.RES_MAX_LR - config.RES_MIN_LR)/(social_max - social_min)) * (finance_df['social_indicator'] - social_max) + config.RES_MIN_LR
finance_df.loc[finance_df['sector_abbr'] == 'com', 'loan_rate'] = ((config.COM_MAX_LR - config.COM_MIN_LR)/(social_max - social_min)) * (finance_df['social_indicator'] - social_max) + config.COM_MIN_LR
finance_df.loc[finance_df['sector_abbr'] == 'ind', 'loan_rate'] = ((config.IND_MAX_LR - config.IND_MIN_LR)/(social_max - social_min)) * (finance_df['social_indicator'] - social_max) + config.IND_MIN_LR
# --- normalization of down payment (i.e. lower social indicator has lower down payment ---
finance_df.loc[finance_df['sector_abbr'] == 'res', 'down_payment'] = ((config.RES_MAX_DP - config.RES_MIN_DP)/(social_max - social_min)) * (finance_df['social_indicator'] - social_max) + config.RES_MIN_DP
finance_df.loc[finance_df['sector_abbr'] == 'com', 'down_payment'] = ((config.COM_MAX_DP - config.COM_MIN_DP)/(social_max - social_min)) * (finance_df['social_indicator'] - social_max) + config.COM_MIN_DP
finance_df.loc[finance_df['sector_abbr'] == 'ind', 'down_payment'] = ((config.IND_MAX_DP - config.IND_MIN_DP)/(social_max - social_min)) * (finance_df['social_indicator'] - social_max) + config.IND_MIN_DP
# --- Write to csv ---
finance_df.to_csv(os.path.join('india_base','financing_rates.csv'), index=False)
def load_growth(agent_df):
"""
Create csv with annual load growth pct by geography.
Columns
-------
scenario (str) : matches the string from the input sheet
year (int) : year of load growth relative to 2014
sector_abbr (str) : the sector of the agent
load_multiplier (float) : load growth relative to 2014
Methodology
-----------
Take ReEDS Load Time Slice Hourly Load by State, average by year
Assumptions
-----------
Currently assumes that all sectors have the same load growth. Could use 'CEA_historic_consumption_by_sector.csv' to normalize this by sector.
"""
reeds = pd.read_excel(
os.path.join('reference_data','NREL_2020_load_forecast_2016-2037_KA.xlsx'),
sheet_name='Growth_NREL_Baseline')
reeds = reeds.loc[reeds['metric'] == 'Annual Energy']
reeds.drop('metric', inplace=True, axis='columns')
reeds.set_index('BA', inplace=True)
# --- scale back load before 2019 ---
reeds_before = reeds[[2017,2018]]
for y in [2014,2015,2016]: # add in previous years
reeds_before[y] = np.nan
reeds_before = reeds_before[[2014,2015,2016,2017,2018]]
reeds_before = reeds_before.fillna(method='bfill', axis=1).fillna(method='ffill', axis=1)
reeds_before = 1 - reeds_before #load expressed as multiplier from next year
reeds_before = reeds_before[list(reeds_before.columns)[::-1]] #reverse list
reeds_before = reeds_before.cumprod(axis=1)
reeds_before = reeds_before[list(reeds_before.columns)[::-1]] #reverse back to chronological
reeds_before[2019] = 1 #2019 EPS is baseline year
reeds_after = reeds[[2021,2022,2023,2024,2025,2026,2031,2036]]
missing_years = [2020]+list(range(2027,2031))+list(range(2032,2036))+list(range(2037,2051))
for y in missing_years:
reeds_after[y] = np.nan
reeds_after += 1 #express as percent increase from previous year
reverse_cagr = lambda x: (x)**(1/5) #convert 5 year compund growth rate to annual
reeds_after[2031] = reeds_after[2031].apply(reverse_cagr)
reeds_after[2036] = reeds_after[2036].apply(reverse_cagr)
reeds_after = reeds_after[list(range(2020,2051))]
reeds_after = reeds_after.fillna(method='bfill', axis=1).fillna(method='ffill', axis=1)
reeds_after = reeds_after.cumprod(axis=1)
load_growth = pd.concat([reeds_before, reeds_after], axis='columns')
load_growth.index.name = 'state_name'
load_growth.reset_index(drop=False, inplace=True)
load_growth = load_growth.melt(
id_vars=['state_name'],
var_name='year',
value_name='load_multiplier'
)
# --- fuzzy string matching ---
clean_list = list(agent_df['state_name'].unique())
load_growth['state_name'] = load_growth['state_name'].apply(helper.sanitize_string)
load_growth['state_name'] = helper.fuzzy_address_matcher(load_growth['state_name'], clean_list)
# --- any missing states ---
avg_load_growth = load_growth.groupby(['year'], as_index=False)['load_multiplier'].mean()
for state in clean_list:
if state not in set(load_growth['state_name']):
state_load_growth = avg_load_growth.copy()
state_load_growth['state_name'] = state
load_growth = load_growth.append(state_load_growth)
# --- map state id ---
load_growth['state_id'] = load_growth['state_name'].map(state_id_lookup)
load_growth.drop(['state_name'], axis='columns', inplace=True)
# --- duplicate for sectors ---
load_growths = []
for s in ['res','com','ind','agg']:
df = load_growth.copy()
df['sector_abbr'] = s
load_growths.append(df)
load_growth = pd.concat(load_growths, axis='rows')
load_growth['scenario'] = 'Planning'
load_growth = load_growth.drop_duplicates(subset=['state_id','sector_abbr','year'])
load_growth.to_csv(os.path.join('india_base','load_growth_projections.csv'), index=False)
def nem_settings(agent_df):
"""
Create nem_settings.csv based on config variables.
Columns
-------
sector_abbr (str) : the sector of the agent
year (int) : year for policy details
nem_system_size_limit_kw (int) : size limit for individual agent system size (kW)
year_end_excess_sell_rate_usd_per_kwh (float) : payment for excess genration at end of year TODO how is this used?
"""
# --- Create Dataframe from permutations, as values are all similar ---
nem_df = pd.DataFrame().from_records(itertools.product(['res','com','ind'], list(set(agent_df['state_id'])), range(2015,2051)))
nem_df.columns = ['sector_abbr','state_id','year']
nem_df.loc[nem_df['sector_abbr']=='res', 'nem_system_size_limit_kw'] = config.RES_NEM_KW_LIMIT
nem_df.loc[nem_df['sector_abbr']=='com', 'nem_system_size_limit_kw'] = config.COM_NEM_KW_LIMIT
nem_df.loc[nem_df['sector_abbr']=='ind', 'nem_system_size_limit_kw'] = config.IND_NEM_KW_LIMIT
# --- Define Compensation Style for each State ---
nem_df['compensation_style'] = 'Net Metering'
nem_df.to_csv(os.path.join('india_base','nem_settings.csv'), index = False)
def rate_escalations(agent_df):
"""
Create rate_escalations.csv based on compound increase of config values.
Columns
-------
source (str) : rate growth planning scenario, from input sheet
state_id (int) : integer representation of state
sector_abbr (str) : the sector of the agent
year (int) : year of rate escalation relative to 2014
escalation_factor (float) : multiplier of rate escalation relative to 2014
"""
# --- Create Dataframe from permutations, as values are all similar ---
rate_esc_df = | pd.DataFrame() | pandas.DataFrame |
import io
import copy
import os
from os.path import join as opj
from PIL import Image
from sqlalchemy import create_engine
import matplotlib.pylab as plt
from matplotlib import patches
from matplotlib.colors import ListedColormap
from pandas import read_sql_query
from pandas import DataFrame, concat, Series
import numpy as np
from PIL import ImageDraw
from collections import Counter
from typing import Tuple, List, Dict, Any
from histomicstk.annotations_and_masks.annotation_and_mask_utils import \
np_vec_no_jit_iou, get_scale_factor_and_appendStr, \
get_image_from_htk_response
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from sklearn.preprocessing import label_binarize
from copy import deepcopy
from configs.nucleus_style_defaults import Interrater as ir, \
DefaultAnnotationStyles, NucleusCategories # noqa
from interrater.constrained_agglomerative_clustering import \
ConstrainedAgglomerativeClustering # noqa
from GeneralUtils import calculate_4x4_statistics
def _maybe_mkdir(folder):
os.makedirs(folder, exist_ok=True)
def _get_color_from_rgbstr(rgbstr):
return [int(j) / 255 for j in rgbstr[4:-1].split(',')]
def _add_fovtype_to_eval_fov(fovinfo, dbcon):
"""Add fov type to fovinfo dict."""
# any subset and user will work
evname = list(fovinfo.keys())[0]
userfov = fovinfo[evname][list(fovinfo[evname].keys())[0]]
contdf = read_sql_query(f"""
SELECT "element_girder_id", "group",
"xmin", "ymin", "xmax", "ymax"
FROM "annotation_elements"
WHERE "fov_id" = {userfov['fov_id']}
;""", dbcon)
# get fov group and add
grps = list(contdf.loc[:, "group"])
fovinfo['fov_type'] = [j for j in grps if "fov" in j.lower()][0]
return fovinfo
def get_fovinfos_for_interrater(dbcon):
"""Get fov ids for inter-rater analysis.
Get dict which is indexed by the FOV name and contains five entries
(EVAL_SET_1, EVAL_SET_2, EVAL_SET_3, EVAL_SET_3), containing the
information about the annotations of THIS fov under different conditions:
- U-control: the observers place bounding boxes around all nuclei
and they are not biased by any pre-existing boundaries.
- B-control: Existing nucleus bounds are obtained using "traditional"
image processing algorithms (smoothing, thresholding, connected
components etc). These are then corrected by the participants
the study's protocol (dots inside correct polygon bounds having
and bounding boxes around nuclei without correct bounds.
- E: Main evaluation set. This is the same as B-control but the bounds
are obtained from traditional methods are used to train
mask-RCNN to obtain more accurate bounds and labels. This is
also the method that is used for obtaining the core set.
- BT-control: Same as E but instead of obtaining the labels to the
nuclei used to train the mask-RCNN by assigning them the same
label as the region labels manually annotated, here the region
labels are obtained by quickly dialing knobs in the HSI space.
- fov_type: See Roche patent. This has to do with how the FOV location
itself was obtained.
Each entry is also a dict indexed by the name of the observer and has the
fov_id for the annotations by that user in that fov location.
"""
fovinfos = dict()
for evalset in ir.EVALSET_NAMES:
# alias = ir.REVERSE_ANNSETS[evalset]['alias']
alias = evalset
# find fovs for this evaludation set
fovs_thisset = read_sql_query(f"""
SELECT "fov_id", "fovname", "QC_label", "slide_name",
"sf", "maybe-xmin", "maybe-ymin", "maybe-xmax", "maybe-ymax",
"XMIN", "YMIN"
FROM "fov_meta"
WHERE "slide_id" IN (
SELECT DISTINCT "itemId"
FROM "annotation_docs"
WHERE "subset" = "{evalset}"
)
;""", dbcon)
for _, fov in fovs_thisset.iterrows():
fov = fov.to_dict()
# don't include FOVs that are not done
if fov['QC_label'] != 'COMPLETE':
continue
# Note that we CANNOT use the locations in the fov name
# as these are slightly expanded beyond the FOV boundary to include
# overflowing annotations. Instead we use the fov locations from
# fov meta. Alternatively, we could have read the FOV contours csv
# file and mapped it to slide coordinates
observer, _, tmp = fov['fovname'].split('_#_')
sldname, tmp = tmp.split("_id")
standard_fovname = "%s_left-%d_top-%d_bottom-%d_right-%d" % (
sldname, fov['maybe-xmin'], fov['maybe-ymin'],
fov['maybe-ymax'], fov['maybe-xmax'],
)
if standard_fovname not in fovinfos.keys():
fovinfos[standard_fovname] = {alias: dict()}
elif alias not in fovinfos[standard_fovname]:
fovinfos[standard_fovname][alias] = dict()
# assign
fovinfos[standard_fovname][alias][observer] = fov
for _, fovinfo in fovinfos.items():
_add_fovtype_to_eval_fov(fovinfo, dbcon=dbcon)
return fovinfos
def _convert_coordstr_to_absolute(coordstr: str, sf: float, minc: int) -> str:
return ','.join([
str(int((int(c) / sf) + minc)) for c in coordstr.split(',')])
def _modify_bbox_coords_to_base_mag(contdf, userfov):
"""Modify bbox coordinates to be at base slide magnification."""
# bounding box
for locstr in ['xmin', 'xmax']:
contdf.loc[:, locstr] = np.int32(
contdf.loc[:, locstr] / userfov['sf'] + userfov['XMIN'])
for locstr in ['ymin', 'ymax']:
contdf.loc[:, locstr] = np.int32(
contdf.loc[:, locstr] / userfov['sf'] + userfov['YMIN'])
# actual coordinates
for c in ('x', 'y'):
contdf.loc[:, f'coords_{c}'] = contdf.loc[:, f'coords_{c}'].apply(
lambda cs: _convert_coordstr_to_absolute(
cs, sf=userfov['sf'], minc=userfov[f'{c.upper()}MIN']))
return contdf
def _get_all_contours_for_eval_fov(fovinfo, dbcon, max_bbox_side=100):
"""Get all contours for this FOV from various users."""
all_conts = DataFrame()
fov_conts = DataFrame()
for evalset in ir.EVALSET_NAMES:
if evalset not in fovinfo.keys():
continue
finfo = fovinfo[evalset]
for user, userfov in finfo.items():
contdf = read_sql_query(f"""
SELECT "element_girder_id", "group",
"xmin", "ymin", "xmax", "ymax", "coords_x", "coords_y"
FROM "annotation_elements"
WHERE "fov_id" = {userfov['fov_id']}
;""", dbcon)
# make sure its at base magnification and has no offset
contdf = _modify_bbox_coords_to_base_mag(
contdf=contdf, userfov=userfov)
contdf.loc[:, "user"] = user
contdf.loc[:, "evalset"] = evalset
# get fov group and remove from dataframe
grps = list(contdf.loc[:, "group"])
fovels = [
(i, j) for i, j in enumerate(grps) if "fov" in j.lower()]
fovcont = contdf.loc[[j[0] for j in fovels], :]
for elid, _ in fovels:
contdf.drop(elid, axis=0, inplace=True)
contdf.loc[:, "fov_type"] = fovels[0][1]
# now we add to main dict
all_conts = concat(
(all_conts, contdf), axis=0, sort=False, ignore_index=True)
fov_conts = concat(
(fov_conts, fovcont), axis=0, sort=False, ignore_index=True)
all_conts.index = all_conts.loc[:, "element_girder_id"]
fov_conts.index = fov_conts.loc[:, "element_girder_id"]
# get rid of "nuclei" which are really FOV annotations that were
# mistakenly clicked by the observer and changed to a non-fov label
right_width = (
all_conts.loc[:, "xmax"] - all_conts.loc[:, "xmin"]) < max_bbox_side
right_height = (
all_conts.loc[:, "ymax"] - all_conts.loc[:, "ymin"]) < max_bbox_side
all_conts = all_conts.loc[right_width, :]
all_conts = all_conts.loc[right_height, :]
return all_conts, fov_conts
def _get_iou_for_fov(all_conts):
"""Get ious for all bboxes/bounds in fov.
The target of the code below is to get a matrix comparing
the IOU or each potential nucleus with all other potential nuclei.
Note that a "potential" nucleus is defined as a bounding
box created by one of the observers OR the bounding box of a polygon
"approved" by one of the observers. We use the bounding box of approved
polygons as opposed to the polygons themselves for two reasons:
1. To avoid bias caused by artificially low IOU values when a polygon
is used versus a bounding box. We don't want an apple to oranges
comparison!
2. Efficiency! Comparing bounding boxes is a vectorized operation that
can be done for all bboxes in an FOV at once.
"""
# get iou of bounding boxes
sliced = np.array(
all_conts.loc[:, ["xmin", "ymin", "xmax", "ymax"]], dtype=int)
iou = np_vec_no_jit_iou(bboxes1=sliced, bboxes2=sliced)
iou = DataFrame(iou, index=all_conts.index, columns=all_conts.index)
return iou
def _get_clusters_for_fov(all_conts, iou, min_iou=0.5, constrained=True):
"""Agglomerative clustering of bounding boxes.
Parameters
----------
all_conts
iou
min_iou
constrained
Returns
-------
"""
# if constrained, find annotation indices that cannot appear in the
# same cluster. In this case, annotations from the same user (and FOV)
# CANNOT map to the same anchor since the user's intention, by definition,
# is to annotate two separate nuclei
dontlink = _get_annlocs_for_same_user(all_conts) if constrained else None
# Hierarchical agglomerative clustering
model = ConstrainedAgglomerativeClustering(
linkage_thresh=1 - min_iou,
linkage='complete', affinity='precomputed',
dontlink=dontlink,
)
# now we fit the model
model.run(cost=1 - np.array(iou.values, dtype=float))
return model
def _get_relative_anchors(cluster_medoids, bounds):
"""Get medoid coords relative to fetched RGB."""
relative_medoids = cluster_medoids.copy()
relative_medoids.loc[:, "xmin"] -= bounds["XMIN"]
relative_medoids.loc[:, "ymin"] -= bounds["YMIN"]
relative_medoids.loc[:, "xmax"] -= bounds["XMIN"]
relative_medoids.loc[:, "ymax"] -= bounds["YMIN"]
relative_medoids = relative_medoids * bounds['sf']
return relative_medoids.astype('int')
def _get_coords_from_coordstr(
coordstr_x: str, coordstr_y: str) -> List[List[int]]:
return [
[int(j) for j in xy] for xy in
zip(coordstr_x.split(','), coordstr_y.split(','))
]
def create_mask_from_coords(
coords, min_x=None, min_y=None, max_x=None, max_y=None):
"""Create a binary mask from given vertices coordinates.
Source: This is modified from code by <NAME> from David Gutman Lab.
This version is modified from histomicstk.annotation_and_mask_utils
Parameters
-----------
coords : np arrray
must be in the form (e.g. ([x1,y1],[x2,y2],[x3,y3],.....,[xn,yn])),
where xn and yn corresponds to the nth vertix coordinate.
Returns
--------
np array
binary mask
"""
polygon = coords.copy()
if any([j is None for j in [min_x, min_y, max_x, max_y]]):
# use the smallest bounding region, calculated from vertices
min_x, min_y = np.min(polygon, axis=0)
max_x, max_y = np.max(polygon, axis=0)
# get the new width and height
width = int(max_x - min_x)
height = int(max_y - min_y)
# shift all vertices to account for location of the smallest bounding box
polygon[:, 0] = polygon[:, 0] - min_x
polygon[:, 1] = polygon[:, 1] - min_y
# convert to tuple form for masking function (nd array does not work)
vertices_tuple = tuple(map(tuple, polygon))
# make the mask
img = Image.new('L', (width, height), 0)
ImageDraw.Draw(img).polygon(vertices_tuple, outline=1, fill=1)
return np.array(img, dtype='int32')
def _get_anchor_from_single_cluster(
relevant_contours, cluster_iou, usrs,
manualusr='Ehab_Hafiz', manualusrgrp='SPs'):
"""Get the true nucleus location (aka "anchor") for a single cluster.
Parameters
----------
relevant_contours: DataFrame
cluster_iou: DataFrame
iou of elements that mapped to that cluster
usrs: dict
indexed by eval set name. Each entry is a dict, where keys
are the names of participants, and the values are either
"undetected" or "DidNotAnnotateFOV". This is used to initialize
the anchor dict and to differentiate between those who annotated
the FOV, and those who annotated it but did not detect the nucleus.
manualusr: str
Returns
-------
"""
manualusr = ir.PARTICIPANT_ALIASES[manualusr]
def _get_anninfo_for_subgroup(evalset):
"""Get the info about annotations by participants in an evalset."""
evdf = relevant_contours.loc[
relevant_contours.loc[:, 'evalset'] == evalset, :]
ev_anninfo = {
who: {'annids': [], 'labels': [], 'users': [], 'polylines': []}
for who in ir.who.keys()}
for annid, row in evdf.iterrows():
# Add label from user. Note that if we did not do constrained
# clustering, and we ended up with more than one annotation
# from the same user, this would simply overwrite the label
# BUT, no worries, the count will be increased and the grder ID
# will still be saved to the matches so that we can later show
# the effect of constraining
usrs[evalset][row['user']] = row['group']
# Add girder ID and label from user to each relevant group
for who, ppl in ir.who.items():
if row['user'] in ppl:
ev_anninfo[who]['annids'].append(annid)
ev_anninfo[who]['labels'].append(row['group'])
ev_anninfo[who]['users'].append(row['user'])
if (evalset != 'U-control') and (
row['type'] == 'polyline'):
ev_anninfo[who]['polylines'].append(row['user'])
return ev_anninfo
def _get_medoid_using_iou(gids, justid=False):
"""Get medoid of a bunch of annotations using their girder id.
NOTE: cluster "medoid" is defined as the element with maximum mean iou
with all other elements in the cluster. In other words, it is
most representative of this cluster. Note that the label for
the medoid is irrelevant.
"""
iousubset = cluster_iou.loc[gids, :]
contsubset = relevant_contours.loc[gids, :]
medoid_id = iousubset.index[np.argmax(np.array(iousubset.mean(1)))]
if justid:
return medoid_id
else:
md = contsubset.loc[medoid_id, ['xmin', 'ymin', 'xmax', 'ymax']]
return dict(md)
def _get_anchor_bbox(anninfo):
"""Get the cluster anchor (i.e. representative element). Here's the
order of preference:
1- If an unbiased manual segmentation boundary exists (by <NAME>),
then we save it and use it for the anchor bounding box limits.
2- We use pathologist annotations from the unbiased control set,
if they exists in the cluster, to get the "proper" anchor location.
3- We use NP annotations from the unbiased control set.
4- We uss the medoid of all matched elements, regardless of set
"""
# init anchor bbox location
anchorbbox = {k: np.nan for k in ('xmin', 'ymin', 'xmax', 'ymax')}
has_manual_bounary = manualusr in \
anninfo['U-control'][manualusrgrp]['users']
manual_boundary = None
unbiased_pids = anninfo['U-control']['Ps']['annids']
unbiased_npids = anninfo['U-control']['NPs']['annids']
# First preference: Use manual boundary
if has_manual_bounary:
rows = relevant_contours.loc[anninfo['U-control'][manualusrgrp][
'annids'], :]
row = rows.loc[rows.loc[:, 'user'] == manualusr, :].iloc[0, :]
manual_boundary = {
'coords_x': row['coords_x'], 'coords_y': row['coords_y']}
# bbox of manual boundary take precedence
anchorbbox.update({k: row[k] for k in (anchorbbox.keys())})
# Second preference: use pathologist annotations from the unbiased set
elif len(unbiased_pids) > 0:
anchorbbox.update(_get_medoid_using_iou(unbiased_pids))
# Third preference: use NP annotations from the unbiased set
elif len(unbiased_npids) > 0:
anchorbbox.update(_get_medoid_using_iou(unbiased_npids))
# Last preference: use all annotations
else:
anchorbbox.update(_get_medoid_using_iou(relevant_contours.index))
return anchorbbox, manual_boundary
def _get_MV_inferred_label_for_who(evalset, who):
labels = anninfo[evalset][who]['labels']
if len(labels) > 0:
return Counter(labels).most_common()[0]
return np.nan, np.nan
def _get_algorithmic_boundary_for_evalset(evalset):
"""Get algorithmic boundary for nucleus.
Remember that each eval set has its own algorithmic boundary,
except of course the unbiased control set, which has none.
"""
polylines = relevant_contours.loc[relevant_contours.loc[
:, 'type'] == 'polyline', :]
polylines = polylines.loc[polylines.loc[:, 'evalset'] == evalset, :]
# unbiased controls don't have algorithmic boundaries
if (polylines.shape[0] < 1) or (evalset == 'U-control'):
return {
f'algorithmic_coords_{c}': np.nan for c in ('x', 'y')}
# find the medoid polyline. Note that even though everyone is presumed
# to have clicked the same algorithmic bounday, it is possible that
# multiple clicked algorithmic boundaries matched to the same cluster
mid = _get_medoid_using_iou(polylines.index, justid=True)
return {
'algorithmic_coords_x': polylines.loc[mid, 'coords_x'],
'algorithmic_coords_y': polylines.loc[mid, 'coords_y'],
}
def _get_anchor_dict(evalset):
"""Make sure all fields exist."""
anchor_id = ",".join([
str(anchorbbox[k]) for k in ('xmin', 'ymin', 'xmax', 'ymax')])
anch = {
'anchor_id': anchor_id,
'fovname': np.nan,
'min_iou': np.nan,
'evalset': evalset,
}
anch.update(anchorbbox)
anch.update({f'{loc}_relative': np.nan for loc in anchorbbox.keys()})
anch.update({
'has_manual_boundary': manual_boundary is not None,
'has_algorithmic_boundary': any([
len(anninfo[evalset][who]['polylines']) > 0
for who in ir.who.keys()]),
'algorithmic_vs_manual_intersect': np.nan,
'algorithmic_vs_manual_sums': np.nan,
'algorithmic_vs_manual_IOU': np.nan,
'algorithmic_vs_manual_DICE': np.nan,
})
# no of matches per experience level
anch.update({
f'n_matches_{who}': float(len(anninfo[evalset][who]['annids']))
for who in ir.who.keys()})
anch.update({
f'UNBIASED_n_matches_{who}': np.nan
for who in ir.CONSENSUS_WHOS})
# tally and no of algorithmic boundary approvals per experience level
anch.update({
f'algorithmic_clicks_{who}':
",".join(anninfo[evalset][who]['polylines'])
for who in ir.who.keys()})
anch.update({
f'n_algorithmic_clicks_{who}': float(len(
anninfo[evalset][who]['polylines']))
for who in ir.who.keys()})
# consensus label per experience level
consensuses = dict()
for who in ir.CONSENSUS_WHOS: # ir.who.keys()
# majority voting
consensuses[f'MV_inferred_label_{who}'], \
consensuses[f'MV_inferred_label_count_{who}'] = \
_get_MV_inferred_label_for_who(evalset, who)
consensuses[f'UNBIASED_MV_inferred_label_{who}'] = np.nan
consensuses[f'UNBIASED_MV_inferred_label_count_{who}'] = np.nan
# expectation maximization
consensuses[f'EM_decision_boundary_is_correct_{who}'] = 0
consensuses[f'EM_inferred_label_{who}'] = np.nan
consensuses[f'EM_inferred_label_confidence_{who}'] = np.nan
consensuses[f'EM_inferred_label_count_{who}'] = np.nan
consensuses[f'UNBIASED_EM_inferred_label_{who}'] = np.nan
consensuses[f'UNBIASED_EM_inferred_label_confidence_{who}'] = np.nan # noqa
# number of Ps in THIS SET who agree with the unbiased label
consensuses[f'UNBIASED_EM_inferred_label_count_{who}'] = np.nan
# Soft EM probabilities for various classes (using THIS evalset)
consensuses.update({
f'EM_prob_{cls}_{who}': np.nan
for cls in ['undetected'] + ir.CLASSES
})
anch.update(consensuses)
# per-user and eval-set labels
anch.update({f'{usr}': usrs[evalset][usr] for usr in ir.All})
# for convenience, add manual boundary coordinates
if manual_boundary is not None:
anch.update({f'manual_{k}': v for k, v in manual_boundary.items()})
else:
anch.update({f'manual_coords_{c}': np.nan for c in ('x', 'y')})
# for convenience, add algorithmic boundary coordinates
anch.update(_get_algorithmic_boundary_for_evalset(evalset))
# add information about algorithmic boundary DICE stats
if anch['has_manual_boundary'] and anch['has_algorithmic_boundary']:
# get coords list
manual_coords = _get_coords_from_coordstr(
anch['manual_coords_x'], anch['manual_coords_y'])
algorithmic_coords = _get_coords_from_coordstr(
anch['algorithmic_coords_x'], anch['algorithmic_coords_y'])
# make sure sizes match
all_coords = np.array(manual_coords + algorithmic_coords)
bound = {}
bound['min_x'], bound['min_y'] = np.min(all_coords, axis=0)
bound['max_x'], bound['max_y'] = np.max(all_coords, axis=0)
# create masks
manual = create_mask_from_coords(np.int32(manual_coords), **bound)
algorithmic = create_mask_from_coords(
np.int32(algorithmic_coords), **bound)
# now get intersection, union, etc
intersect = np.sum((manual + algorithmic) == 2)
sums = np.sum(manual) + np.sum(algorithmic)
anch['algorithmic_vs_manual_intersect'] = intersect
anch['algorithmic_vs_manual_sums'] = sums
anch['algorithmic_vs_manual_IOU'] = intersect / (sums - intersect)
anch['algorithmic_vs_manual_DICE'] = 2. * intersect / sums
# for completeness, and to be able to access these later, we also
# save a comma-separated list of the girder IDs of annotations
# that matched to this medoid from this evaluation set
keep = relevant_contours.loc[:, 'evalset'] == evalset
anch['matches'] = ",".join(list(relevant_contours.loc[keep, :].index))
return anch
# Get user label tally, as well as for each experience level. This also
# updates the usr dict as an intentional side effect
anninfo = {
ev: _get_anninfo_for_subgroup(ev) for ev in ir.EVALSET_NAMES}
# Get the anchor bounding box
anchorbbox, manual_boundary = _get_anchor_bbox(anninfo)
# Initialize the anchor, making sure all fields are there
anchor = {
evalset: _get_anchor_dict(evalset)
for evalset in ir.EVALSET_NAMES}
return anchor
def _get_anchors_for_fov_by_clustering(
all_conts, iou, participants, min_iou=0.5, constrained=True):
"""Get nucleus anchors for FOV.
Parameters
----------
all_conts: DataFrame
iou: DataFrame
IOU for annotations against each other
participants: dict
each key is an eval set, and its value is a list of
participants who annotated this FOV
min_iou: float
min_iou for clustering (1 - linkage threshold)
constrained: bool
constrained clustering? i.e. prevent annotations by the same
participant from appearing in the same cluster?
Returns
-------
DataFrame
"""
# first we cluster
model = _get_clusters_for_fov(
all_conts=all_conts, iou=iou, min_iou=min_iou, constrained=constrained)
# Differentiate those who didn't annotated FOV and those who did
# this is an INITIALIZATION dict so a true copy must be passed
usrs = {
evalset: {
usr: 'undetected' if usr in ppl else 'DidNotAnnotateFOV'
for usr in ir.All
} for evalset, ppl in participants.items()
}
cluster_anchors = {evalset: [] for evalset in ir.EVALSET_NAMES}
# clid=4427; annlocs=model.clusters[clid]
for clid, annlocs in model.clusters.items():
relevant_idxs = list(iou.index[annlocs])
anchor = _get_anchor_from_single_cluster(
relevant_contours=all_conts.loc[relevant_idxs, :].copy(),
cluster_iou=iou.iloc[annlocs, annlocs].copy(),
usrs=copy.deepcopy(usrs),
)
for evalset in anchor.keys():
cluster_anchors[evalset].append(anchor[evalset])
# convert to dfs
for evalset in ir.EVALSET_NAMES:
cluster_anchors[evalset] = DataFrame.from_records(
cluster_anchors[evalset])
cluster_anchors[evalset].index = cluster_anchors[
evalset].loc[:, 'anchor_id']
cluster_anchors[evalset].loc[:, "min_iou"] = min_iou
return cluster_anchors
def _get_bounds_for_eval_fov(gc, dbcon, elementid, MPP, MAG, all_conts):
# any copy of the slide will do obviously
slide_id = read_sql_query(f"""
SELECT "itemId"
FROM "annotation_docs"
WHERE "annotation_girder_id" IN (
SELECT "annotation_girder_id"
FROM "annotation_elements"
WHERE "element_girder_id" = "{elementid}"
)
;""", dbcon).iloc[0, 0]
# calculate the scale factor
sf, appendStr = get_scale_factor_and_appendStr(
gc=gc, slide_id=slide_id, MPP=MPP, MAG=MAG)
# get overall bounds for FOV
bounds = {
'XMIN': np.min(all_conts.loc[:, "xmin"]),
'YMIN': np.min(all_conts.loc[:, "ymin"]),
'XMAX': np.max(all_conts.loc[:, "xmax"]),
'YMAX': np.max(all_conts.loc[:, "ymax"]),
'sf': sf,
'appendStr': appendStr,
}
return bounds, slide_id
def _get_rgb_for_interrater(gc, bounds, slide_id):
""""""
getStr = \
"/item/%s/tiles/region?left=%d&right=%d&top=%d&bottom=%d" \
% (slide_id,
bounds['XMIN'], bounds['XMAX'],
bounds['YMIN'], bounds['YMAX'])
getStr += bounds['appendStr']
resp = gc.get(getStr, jsonResp=False)
rgb = get_image_from_htk_response(resp)
return rgb
def _visualize_bboxes_on_rgb(
rgbim, xy_df, fovcont=None,
totalcount=None, lblcount=None, lblcolors=None,
bbox_linewidth=None, bbf=0.1, bbox_color='#525150',
add_points=False, point_size=None, psmin=4, psf=0.3,
point_color='#525150'):
fov = copy.deepcopy(fovcont)
# later on flipped by matplotlib for weird reason
rgb = np.flipud(rgbim.copy())
# make sure also y coords are flipped
for locstr in ("ymin", "ymax"):
xy_df.loc[:, locstr] = rgb.shape[0] - xy_df.loc[:, locstr] + 1
if fovcont is not None:
fov[locstr] = rgb.shape[0] - fovcont[locstr] + 1
if add_points:
fig = plt.figure()
dpi = 300
else:
fig = plt.figure(
figsize=(rgb.shape[1] / 1000, rgb.shape[0] / 1000), dpi=100)
dpi = 1000
ax = plt.subplot(111)
ax.imshow(rgb)
plt.axis('off')
ax = plt.gca()
ax.set_xlim(0.0, rgb.shape[1])
ax.set_ylim(0.0, rgb.shape[0])
# single versus multiple bounding box linewidths
if np.isscalar(bbox_linewidth):
lw = [bbox_linewidth] * xy_df.shape[0]
else:
lw = bbf * totalcount
# single versus multiple bounding box colors
if np.isscalar(bbox_color):
ec = [bbox_color] * xy_df.shape[0]
else:
ec = lblcolors
# single versus multiple point sizes
if add_points:
if np.isscalar(point_size):
ps1 = [point_size] * xy_df.shape[0]
ps2 = None
else:
ps1 = psmin + totalcount * psf
ps2 = psmin + lblcount * psf
# now draw the bounding boxes & add points
loc = 0
for _, me in xy_df.iterrows():
# add bounding box (detection)
rect = patches.Rectangle(
xy=(me['xmin'], me['ymin']),
width=me['xmax'] - me['xmin'],
height=me['ymax'] - me['ymin'],
edgecolor=ec[loc], linewidth=lw[loc],
facecolor='none', linestyle='-')
ax.add_patch(rect)
loc += 1
# Add the fov contour if given
if fovcont is not None:
rect = patches.Rectangle(
xy=(fov['xmin'], fov['ymin']),
width=fov['xmax'] - fov['xmin'],
height=fov['ymax'] - fov['ymin'],
edgecolor=fov['color'],
linewidth=0.2 if add_points else 0.2,
facecolor='none', linestyle='--')
ax.add_patch(rect)
# add point (classification) -- looks ugly (yikes!)
if add_points:
x = np.int32(xy_df.loc[:, "xmin"] + (
xy_df.loc[:, "xmax"] - xy_df.loc[:, "xmin"]) / 2)
y = np.int32(xy_df.loc[:, "ymin"] + (
xy_df.loc[:, "ymax"] - xy_df.loc[:, "ymin"]) / 2)
# main seed points (totals)
ax.scatter(
x, y, color=point_color, alpha=1., marker='.',
edgecolor=None, s=ps1 ** 2,
)
# most dominant label
if ps2 is not None:
ax.scatter(
x, y, color=lblcolors, alpha=1.0, marker='.',
edgecolor=None, s=ps2 ** 2,
)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
buf = io.BytesIO()
plt.savefig(buf, format='png', pad_inches=0, dpi=dpi)
buf.seek(0)
rgb_vis = np.uint8(Image.open(buf))[..., :3]
plt.close()
return rgb_vis
def _restrict_to_user_subset(all_conts, who='NP'):
keep = all_conts.loc[:, "user"].isin(ir.who[who])
return all_conts.loc[keep, :]
def _get_annlocs_for_same_user(all_conts):
"""Find annotation indices that cannot appear in the same cluster.
In this case, annotations from the same user and evaluation set
CANNOT map to the same anchor since the user's intention, by definition,
is to annotate two separate nuclei.
"""
dontlink = []
for evalset in ir.EVALSET_NAMES:
evalconts = all_conts.loc[all_conts.loc[:, "evalset"] == evalset, :]
for user in ir.All:
user_elements = evalconts.loc[evalconts.loc[:, "user"] == user, :]
if user_elements.shape[0] > 0:
dontlink.append(np.argwhere(np.in1d(
all_conts.index, user_elements.index))[:, 0])
return dontlink
def get_anchors_for_iou_thresh(
fovinfo, dbcon, who='All', min_iou=0.25, constrained=True, gc=None,
add_relative_bounds=True, MPP=0.2, MAG=None):
"""Get all nucleus anchors for a range of clustering IOU thresholds.
Note that there are two types of analyses we want to investigate:
1. Can we use concordance with SPs as proxy for core set performance
2. What would happen if we only use NP annotations for everything
See ctme/nuclei/ideas for details ...
Q1: Since the CORE_SET is based on the same methodology as EVAL_SET_3,
how accurate are NP annotations compared to SPs/JPs on EVAL_SET_3. i.e.
if SPs/JPs did the annotations under the exact same conditions?
Q2: How accurate are the annotations made by SPs & NPs, in absolute terms,
when compared against our "unbiased" approach (SPs/JPs on EVAL_SET_1)??
Parameters
----------
fovinfo: dict
keys are names of participants who annotated the fov, each entry
is also a dict constaining metadata about the fov by this particular
participant
dbcon: connected sqlalchemy database
who: str
participant experience level to keep
min_iou: float
min_iou to map annotations to same cluster
constrained: bool
gc: authenticated girder client
add_relative_bounds: bool
MPP: float or None
MAG: float or None
Returns
-------
dict
"""
out = dict()
# Read all contours. Since the masks are small and there's < 30 nuclei
# this is OK and it save us IO cost
all_conts, out['fov_conts'] = _get_all_contours_for_eval_fov(
fovinfo=fovinfo, dbcon=dbcon)
# we get the absolute bounds of the FOV, including all annotations
# from all users and evaluation sets
if add_relative_bounds:
assert gc is not None, "You must provide girder client"
out['bounds'], out['slide_id'] = _get_bounds_for_eval_fov(
gc=gc, dbcon=dbcon, elementid=all_conts.index[0],
MPP=MPP, MAG=MAG, all_conts=all_conts)
# ** Restrict to subsets of users **
if who != 'All':
all_conts = _restrict_to_user_subset(all_conts, who=who)
# If empty FOV, return None
if all_conts.shape[0] < 1:
return
# differentiate between bboxes and polygons
all_conts.loc[:, 'type'] = all_conts.loc[:, 'coords_x'].apply(
lambda x: 'polyline' if len(x.split(',')) > 5 else 'rectangle')
# get ious for all potential nuclei bboxes
iou = _get_iou_for_fov(all_conts)
# init
all_anchors = {evset: DataFrame() for evset in ir.EVALSET_NAMES}
# This is a list of people who annotated the FOV for both the main
# evaluation set as well as the controls
participants = {
k: list(fovinfo[k].keys()) if k in fovinfo.keys() else []
for k in ir.EVALSET_NAMES
}
# get absolute coordinates of medoids
cluster_anchors = _get_anchors_for_fov_by_clustering(
all_conts=all_conts, iou=iou, min_iou=min_iou,
constrained=constrained, participants=participants)
for evset in ir.EVALSET_NAMES:
# relative to the fov at desired MPP
if add_relative_bounds:
relative_anchors = _get_relative_anchors(
cluster_medoids=cluster_anchors[evset].loc[
:, ["xmin", "ymin", "xmax", "ymax"]],
bounds=out['bounds'])
for col in relative_anchors.columns:
cluster_anchors[evset].loc[:, f'{col}_relative'] = \
relative_anchors.loc[:, col]
all_anchors[evset] = concat(
(all_anchors[evset], cluster_anchors[evset]), axis=0,
sort=False, ignore_index=True)
out['all_anchors'] = all_anchors
return out
def _get_fovnames_with_commonest_nobservers(
fovmetas: DataFrame, who: str) -> Tuple[List[str], int]:
""""Get the names of FOVs with commonest number of observers.
Most of the time, this will be all observers (i.e. 6 of 6 pathologists),
but there may be exceptions. It is important to only keep FOVs with the
same number of observers so that the ease of detection of nuclei is
not confounded by the number who actually annotated the FOV. i.e so that
we know that a nucleus detected by only 4 pathologists is because it is
a tough nucleus, not because only 4 pathologists happened to annotate
this particular FOV.
"""
nperfov = {
row['fovname']: len([
p for p in row['participants'].split(',')
if p in ir.who[who]
])
for _, row in fovmetas.iterrows()
}
tally = Counter([v for _, v in nperfov.items()])
# the higher of the two most common n_observers per FOV
maxn = max(tally.most_common()[0][0], tally.most_common()[1][0])
fovnames = [k for k, v in nperfov.items() if v == maxn]
return fovnames, maxn
def get_fovs_annotated_by_almost_everyone(
dbcon_anchors, unbiased_is_truth: bool, whoistruth: str,
evalset: str, who: str, get_anchors: bool = True) -> Dict[str, Any]:
"""Get FOVs with most observers.
See docstring for _get_fovnames_with_commonest_nobservers().
"""
# get fov metadata
fovmetas = | read_sql_query(f"""
SELECT "fovname", "participants_{evalset}" AS "participants"
FROM "fov_meta"
WHERE "participants_{evalset}" NOT NULL
;""", dbcon_anchors) | pandas.read_sql_query |
"""
This script analyzes Python imports.
It accepts
* path to csv file with FQ names.
* path to the folder where to save the stats.
* path to the csv file with labeled projects by python version.
For each unique import name, the number of projects in which it occurs is counted.
It is also possible to group statistics by language version of Python.
This script is a wrapper over import_directives_analysis.py.
"""
import argparse
import logging
from pathlib import Path
from typing import Optional, Tuple
from analysis.dependencies.fq_names_tree import build_fq_name_tree_decomposition
from analysis.dependencies.import_directives_analysis import (
fq_names_groups_to_stats,
fq_names_to_dict,
get_prefix_by_package,
group_fq_names_by,
)
from analysis.dependencies.python.imports_column import ImportsColumn
import pandas as pd
logging.basicConfig(level=logging.INFO)
# These are the default Python arguments passed to import_directives_analysis.py
MAX_PACKAGE_LEN = 3
MAX_SUBPACKAGES = 10000
MAX_LEAF_SUBPACKAGES = 0.8
MIN_OCCURRENCE = 100
MAX_OCCURRENCE = 1500
MAX_U_OCCURRENCE = 500
def configure_parser(parser: argparse.ArgumentParser):
parser.add_argument(
'--input',
type=lambda value: Path(value).absolute(),
help='path to csv file with fq names',
required=True,
)
parser.add_argument(
'--output',
type=lambda value: Path(value).absolute(),
help='path to the folder where to save the stats',
required=True,
)
parser.add_argument(
'--python-versions',
type=lambda value: Path(value).absolute(),
help='path to the csv file with labeled projects by python version',
)
def collect_stats(imports: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
total_stats = imports.value_counts([ImportsColumn.IMPORT.value])
total_stats = total_stats.reset_index(name=ImportsColumn.COUNT.value)
total_stats.rename(columns={ImportsColumn.IMPORT.value: ImportsColumn.FQ_NAME.value}, inplace=True)
fq_names = imports[ImportsColumn.IMPORT.value].tolist()
fq_names_dict = fq_names_to_dict(fq_names)
_, sub_roots = build_fq_name_tree_decomposition(
fq_names_dict=fq_names_dict,
max_subpackages=MAX_SUBPACKAGES,
max_leaf_subpackages=MAX_LEAF_SUBPACKAGES,
min_occurrence=MIN_OCCURRENCE,
max_occurrence=MAX_OCCURRENCE,
max_u_occurrence=MAX_U_OCCURRENCE,
)
packages = [sub_root.full_name for sub_root in sub_roots]
fq_names_by_package = group_fq_names_by(
fq_names=fq_names,
group_by_function=lambda fq_name: get_prefix_by_package(fq_name, packages, MAX_PACKAGE_LEN),
)
fq_names_by_package_stats = fq_names_groups_to_stats(fq_names_by_package)
package_stats = pd.DataFrame(
fq_names_by_package_stats.items(),
columns=[ImportsColumn.FQ_NAME.value, ImportsColumn.COUNT.value],
)
logging.info(f'Processed {len(total_stats)} unique FQ names and {len(package_stats)} unique package names.')
return total_stats, package_stats
def main():
parser = argparse.ArgumentParser()
configure_parser(parser)
args = parser.parse_args()
input_path: Path = args.input
output_path: Path = args.output
python_versions_path: Optional[Path] = args.python_versions
output_path.mkdir(parents=True, exist_ok=True)
imports = | pd.read_csv(input_path, keep_default_na=False) | pandas.read_csv |
import pandas as pd
import numpy as np
import os
from scipy.stats import skew
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
import warnings
warnings.filterwarnings('ignore')
class TitanicData:
def __init__(self, file_path):
self.data = pd.read_csv(os.path.join(file_path,'train.csv'))
self.testset = pd.read_csv(os.path.join(file_path,'test.csv'))
self.scaler = StandardScaler()
self.num_features = ['Pclass','Age','SibSp','Parch','Fare']
def transform(self, **kwargs):
# args
scaling = False if 'scaling' not in kwargs.keys() else kwargs['scaling']
# pre-processing
train = self.processing(self.data, **kwargs)
x_train = train.drop('Survived', axis=1)
y_train = train.Survived
# scaling
if scaling:
x_train[self.num_features] = self.scaler.fit_transform(x_train[self.num_features])
# test set
if isinstance(self.testset, pd.DataFrame):
x_test = self.processing(self.testset, **kwargs)
# scaling
if scaling:
x_test[self.num_features] = self.scaler.transform(x_test[self.num_features])
return (x_train.values, y_train.values), x_test.values
return x_train.values, y_train.values
def processing(self, raw_data, **kwargs):
data = raw_data.copy()
# args
dummy_dropfirst = True if 'dummy_dropfirst' not in kwargs.keys() else kwargs['dummy_dropfirst']
# Sex은 0,1 로 변환
sex_dict = {
'male': 0,
'female': 1
}
data['Sex'] = data.Sex.map(sex_dict)
# Name은 Title을 추출하여 ['Mr','Mrs','Miss','Master','Other'] 로 분류
# Title에 대한 정보 : https://en.wikipedia.org/wiki/Honorific
data.Name = data.Name.str.split('.', expand=True).iloc[:,0].str.split(',', expand=True).iloc[:,1].str.strip()
major = data.Name.value_counts().iloc[:4]
data.Name = data.Name.apply(lambda x : 'Other' if x not in major.index else x)
# Age는 각 타이틀별 중앙값으로 대체
age_median = dict(data.groupby('Name').Age.median())
for k, v in age_median.items():
data.loc[data.Age.isnull() & (data.Name==k), 'Age'] = v
# 왜인지 모르겠지만 age에 소수점이 있음
data['Age'] = data.Age.astype(int)
# Embarked는 최빈값으로 대체
data.loc[data.Embarked.isnull(), 'Embarked'] = data.Embarked.mode().values
# Fare는 큰 이상치가 있기때문에 log1p 변환
data.loc[data.Fare.isnull(), 'Fare'] = data.Fare.median()
data['Fare'] = np.log1p(data.Fare)
# Ticket과 Cabin은 사용안함
data = data.drop(['Ticket','Cabin'], axis=1)
# PassengerId 제외
data = data.drop('PassengerId', axis=1)
# dummy transform
data = pd.get_dummies(data, drop_first=dummy_dropfirst)
return data
class HousePriceData:
def __init__(self, file_path):
self.data = pd.read_csv(os.path.join(file_path,'train.csv'))
self.testset = pd.read_csv(os.path.join(file_path,'test.csv'))
self.scaler = StandardScaler()
self.imputer = SimpleImputer()
self.encoder = OneHotEncoder()
self.num_features = None
self.missing_features = None
self.skew_features = None
self.remove_features = []
def transform(self, **kwargs):
# args
scaling = False if 'scaling' not in kwargs.keys() else kwargs['scaling']
# pre-processing
train = self.processing(self.data, **kwargs)
x_train = train.drop('SalePrice', axis=1)
y_train = train.SalePrice
# test set
x_test = self.processing(self.testset, training=False, **kwargs)
# dummy transform
data = | pd.concat([x_train, x_test],axis=0) | pandas.concat |
import argparse
import os
import pickle
from pathlib import Path
import gym
import gym_chrome_dino
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from network.pg import PG
from utils.show_img import show_img
class GameSession:
def __init__(
self, session_env, initial_epsilon=0.1, final_epsilon=0.0001, observe=False,
steps_to_observe=100, frames_to_action=1, frames_to_anneal=100000, replay_memory_size=50000,
minibatch_size=16, n_actions=3, gamma=0.99, steps_to_save=1000,
loss_path='./models/prl/loss.csv', scores_path='./models/prl/scores.csv',
actions_path='./models/prl/actions.csv', q_values_path='./models/prl/q_values.csv',
):
self.session_env = session_env
self.initial_epsilon = initial_epsilon
self.final_epsilon = final_epsilon
self.observe = observe
self.steps_to_observe = steps_to_observe
self.frames_to_action = frames_to_action
self.frames_to_anneal = frames_to_anneal
self.replay_memory_size = replay_memory_size
self.minibatch_size = minibatch_size
self.n_actions = n_actions
self.gamma = gamma
self.steps_to_save = steps_to_save
self.loss_path = loss_path
self.scores_path = scores_path
self.actions_path = actions_path
self.q_values_path = q_values_path
# Display the processed image on screen using openCV, implemented using python coroutine
self._display = show_img()
# Initialize the display coroutine
self._display.__next__()
# Initialize the required files
self.loss_df = pd \
.read_csv(loss_path) if os.path.isfile(loss_path) else pd.DataFrame(columns=['loss'])
self.scores_df = pd \
.read_csv(scores_path) if os.path.isfile(scores_path) else | pd.DataFrame(columns=['scores']) | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list( | pd.date_range("20130101", periods=1) | pandas.date_range |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = | date_range('1/1/2000', '1/1/2010') | pandas.date_range |
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from dl_for_env import call_model
# Global variables
# HMAX_NORMALIZE = 10
# INITIAL_ENERGY = 1000
PLANT_DIM = 1
EFF_PUMP = 0.9
EFF_ERD = 0.8
FLOW_FEED = 1000
lookback_size = 7
# transaction fee: 1/1000 reasonable percentage
# TRANSACTION_FEE_PERCENT = 0.001
REWARD_SCALING = 1e-2
class BWTPEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, df, day=0):
# super(StockEnv, self).__init__()
# date increment
self.day = day
self.df = df
# action_space normalization and the shape is PLANT_DIM
self.action_space = spaces.Box(low=-1, high=1, shape=(PLANT_DIM,))
# Shape = 4: [Current Balance]+[prices]+[owned shares] +[macd]
self.observation_space = spaces.Box(low=0, high=np.inf, shape=(4,))
# load data from a pandas dataframe
self.data = self.df.loc[self.day, :]
# termination
self.terminal = False
# save the total number of trades
self.trades = 0
# initalize state
self.state = [0.0] + \
[self.data.flowrate] + \
[self.data.Total] + \
[self.data.pressure]
# [0] * PLANT_DIM + \
# initialize reward and cost
self.reward = 0
self.cost = 0
self.energy_difference=0
self.total_energy_difference = 0
self.total_reward = 0
# self.total_actual_energy = 0
self.total_optimize_energy = 0
# memorize the total value, total rewards
self.energy_memory = []
self.rewards_memory = []
self.energy_difference_memory = []
self.total_energy_difference_memory = []
self.action_container = [float(self.df['pressure'].mean()) for _ in range(lookback_size)]
self.total_actual_energy = 0
# self.total_actual_energy = 1502.87059974546
#
# def _increase_pressure(self, index, action):
#
# if self.state[index + PLANT_DIM + 1] > 0:
# self.state[1] = call_model(action)
# self.state[3] = action
# # energy consumption calculation
# self.state[0] += \
# (((self.state[1]*self.state[3])/EFF_PUMP)-(((self.state[3]-EFF_ERD*(self.state[3]-3))*(FLOW_FEED-self.state[1]))/EFF_PUMP))/self.state[3]
# self.state[index + PLANT_DIM + 1] -= min(abs(action), self.state[index + PLANT_DIM + 1])
# self.trades += 1
# # # update transaction costs
# self.cost += self.state[0]*1000
#
# else:
# pass
#
# def _decrease_pressure(self, index, action):
#
# available_amount = self.state[0] // self.state[index + 1]
# # update balance
# self.state[1] = call_model(action)
# self.state[3] = action
# # energy consuption calculation
# self.state[0] += \
# (((self.state[1]*self.state[3])/EFF_PUMP)-(((self.state[3]-EFF_ERD*(self.state[3]-3))*(FLOW_FEED-self.state[1]))/EFF_PUMP))/self.state[3]
# # update held shares
# self.state[index + PLANT_DIM + 1] += min(available_amount, action)
# # # update transaction costs
# self.cost += self.state[0]*1000
# self.trades += 1
def change_pressure(self, action):
# self.state[0] += \
# (((self.state[1] * self.state[3]) / EFF_PUMP) \
# - (((self.state[3] - EFF_ERD * (self.state[3] - 3)) * (FLOW_FEED - self.state[1])) / EFF_PUMP)) / \
# self.state[3]
# initial_engergy = self.state[0]
self.action_container = np.roll(self.action_container, -1)
self.action_container[-1] = action
actual_energy = self.state[2]
# self.total_actual_energy += self.state[2]
# update balance
self.state[1] = call_model(self.action_container)*13
# self.state[3] = action[-1]
# energy consuption calculation
self.state[0] = \
(((self.state[1]*self.state[3])/EFF_PUMP)+(((self.state[3]-EFF_ERD*(self.state[3]-3))*(FLOW_FEED-self.state[1]))/EFF_PUMP))/(self.state[1]*36)
# if action >0:
# if 0 < self.state[0] < 1:
# self.state[3] = action[-1]
# optimize_energy = self.state[0]
# self.total_optimize_energy += optimize_energy
# self.total_actual_energy += actual_energy
#
# self.energy_difference = actual_energy- optimize_energy
# # self.total_energy_difference = self.total_actual_energy - self.total_optimize_energy
#
# # update held shares
# #self.state[index + PLANT_DIM + 1] += min(available_amount, action)
# # # update transaction costs
# self.cost += self.state[0]*10
# self.trades += 1
#
# else:
# self.action_container[-1] = self.state[3]
#
# optimize_energy = actual_energy
# self.total_optimize_energy += optimize_energy
# self.total_actual_energy += actual_energy
#
# self.energy_difference = 0
# # self.total_energy_difference = self.total_actual_energy - self.total_optimize_energy
#
# # update held shares
# #self.state[index + PLANT_DIM + 1] += min(available_amount, action)
# # # update transaction costs
# self.cost += self.state[0]*10
# else:
# self.action_container[-1] = self.state[3]
# optimize_energy = actual_energy
# self.total_optimize_energy += optimize_energy
# self.total_actual_energy += actual_energy
# self.energy_difference = 0
# # self.total_energy_difference = self.total_actual_energy - self.total_optimize_energy
#
# # update held shares
# #self.state[index + PLANT_DIM + 1] += min(available_amount, action)
# # # update transaction costs
# self.cost += self.state[0]*10
self.state[3] = action[-1]
optimize_energy = self.state[0]
self.total_optimize_energy += optimize_energy
self.total_actual_energy += actual_energy
self.energy_difference = actual_energy - optimize_energy
# self.total_energy_difference = self.total_actual_energy - self.total_optimize_energy
# update held shares
#self.state[index + PLANT_DIM + 1] += min(available_amount, action)
# # update transaction costs
self.cost += self.state[0]*10
self.trades += 1
def step(self, actions):
# actions is a list of floats of length=1
self.terminal = self.day >= len(self.df.index.unique()) - 1
if self.terminal:
#plt.plot(self.energy_memory, 'r')
#plt.savefig('account_value.png')
#plt.close()
# end_total_energy = self.state[0] + \
# sum(np.array(self.state[1:(PLANT_DIM + 1)]) * np.array(
# self.state[(PLANT_DIM + 1):(PLANT_DIM * 2 + 1)]))
end_energy = self.state[0]
self.total_energy_difference = self.total_actual_energy - self.total_optimize_energy
print("previous_total_energy:{}".format(self.energy_memory[0]))
print("end_energy:{}".format(end_energy))
print("total_actual_energy:{}".format(self.total_actual_energy))
print("total_optimize_energy:{}".format(self.total_optimize_energy))
print("total_energy_difference:{}".format(self.total_energy_difference))
df_total_value = pd.DataFrame(self.energy_difference_memory)
df_total_value.to_csv('energy_difference_memory_2.csv')
df_total_value = pd.DataFrame(self.energy_memory)
df_total_value.to_csv('energy_memory_2.csv')
df_total_value = pd.DataFrame(self.total_energy_difference_memory)
df_total_value.to_csv('total_energy_difference_memory_2.csv')
print("reward:{}".format(self.reward))
# print("total_reward:{}".format(self.total_reward)) # - INITIAL_ENERGY
# print("total_reward:{}".format(self.state[0] + sum(np.array(self.state[1:(PLANT_DIM + 1)]) * np.array(
# self.state[(PLANT_DIM + 1):(PLANT_DIM * 2 + 1)])))) # - INITIAL_ENERGY
print("total_cost: ", self.cost)
print("total trades: ", self.trades)
# df_total_value.columns = ['account_value']
# df_total_value['daily_return'] = df_total_value.pct_change(1)
# if df_total_value['daily_return'].std() != 0:
# sharpe = (252 ** 0.5) * df_total_value['daily_return'].mean() / \
# df_total_value['daily_return'].std()
# print("Sharpe: ", sharpe)
df_rewards = | pd.DataFrame(self.rewards_memory) | pandas.DataFrame |
import pandas as pd
import numpy as np
import random
def generate_variants(seq):
# generate a list of all possible variants of a sequence
variants = []
variant_nt = []
variant_pos = []
nts = ['A', 'C', 'T', 'G']
for i, seq_nt in enumerate(seq):
for N in nts:
if seq_nt != N:
new_seq = seq[:i] + N + seq[i+1:]
variants.append(new_seq)
variant_nt.append(N)
variant_pos.append(i)
variant_df = pd.DataFrame({'variant': variants, 'nt': variant_nt, 'pos': variant_pos})
return variant_df
def mutagenize_seq(guide, model):
variant_df = generate_variants(guide)
variant_predictions = model.predict_seqs(variant_df.variant)
variant_df['prediction'] = variant_predictions
original_prediction = model.predict_seqs([guide])
variant_df['delta'] = variant_df.prediction - original_prediction
summarized_df = variant_df.groupby('pos').agg({'delta': 'mean'}).reset_index()
summarized_df['nt'] = list(guide)
summarized_df['importance'] = np.absolute(summarized_df.delta)
summarized_df['pos'] = summarized_df.pos + 1
summarized_df['context'] = guide
return summarized_df
def mutagenize_model(model, resamples):
nts = ['A', 'C', 'T', 'G']
seq_len = model.enzyme['context_length']
mutation_list = []
sequence_list = []
sequence = ''.join(random.choice(nts) for i in range(seq_len))
mutation = {'sequence': sequence, 'reference': '', 'nt': '','position': float('nan')}
mutation_list.append(mutation)
sequence_list.append(sequence)
mutation_list = []
for i in range(resamples):
while sequence in sequence_list:
last_sequence = sequence
position = random.randint(0, seq_len - 1)
nt = random.choice(nts)
sequence = sequence[:position] + nt + sequence[(position + 1):]
mutation = {'sequence': sequence, 'reference': last_sequence, 'nt': nt, 'position': position + 1}
mutation_list.append(mutation)
sequence_list.append(sequence)
sequence_df = pd.DataFrame(mutation_list)
sequence_predictions = model.predict_seqs(sequence_df.sequence)
prediction_df = | pd.DataFrame({'sequence': sequence_df.sequence, 'prediction': sequence_predictions}) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, | zip(args, intervals) | pandas.compat.zip |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
This file contains utility functions for creating features for time
series forecasting applications. All functions defined assume that
there is no missing data.
"""
import calendar
import itertools
import pandas as pd
import numpy as np
from datetime import timedelta
from sklearn.preprocessing import MinMaxScaler
from fclib.feature_engineering.utils import is_datetime_like
# 0: Monday, 2: T/W/TR, 4: F, 5:SA, 6: S
WEEK_DAY_TYPE_MAP = {1: 2, 3: 2} # Map for converting Wednesday and
# Thursday to have the same code as Tuesday
HOLIDAY_CODE = 7
SEMI_HOLIDAY_CODE = 8 # days before and after a holiday
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def day_type(datetime_col, holiday_col=None, semi_holiday_offset=timedelta(days=1)):
"""
Convert datetime_col to 7 day types
0: Monday
2: Tuesday, Wednesday, and Thursday
4: Friday
5: Saturday
6: Sunday
7: Holiday
8: Days before and after a holiday
Args:
datetime_col: Datetime column.
holiday_col: Holiday code column. Default value None.
semi_holiday_offset: Time difference between the date before (or after)
the holiday and the holiday. Default value timedelta(days=1).
Returns:
A numpy array containing converted datatime_col into day types.
"""
datetype = pd.DataFrame({"DayType": datetime_col.dt.dayofweek})
datetype.replace({"DayType": WEEK_DAY_TYPE_MAP}, inplace=True)
if holiday_col is not None:
holiday_mask = holiday_col > 0
datetype.loc[holiday_mask, "DayType"] = HOLIDAY_CODE
# Create a temporary Date column to calculate dates near the holidays
datetype["Date"] = pd.to_datetime(datetime_col.dt.date, format=DATETIME_FORMAT)
holiday_dates = set(datetype.loc[holiday_mask, "Date"])
semi_holiday_dates = [
pd.date_range(start=d - semi_holiday_offset, end=d + semi_holiday_offset, freq="D") for d in holiday_dates
]
# Flatten the list of lists
semi_holiday_dates = [d for dates in semi_holiday_dates for d in dates]
semi_holiday_dates = set(semi_holiday_dates)
semi_holiday_dates = semi_holiday_dates.difference(holiday_dates)
datetype.loc[datetype["Date"].isin(semi_holiday_dates), "DayType"] = SEMI_HOLIDAY_CODE
return datetype["DayType"].values
def hour_of_day(datetime_col):
"""Returns the hour from a datetime column."""
return datetime_col.dt.hour
def time_of_year(datetime_col):
"""
Time of year is a cyclic variable that indicates the annual position and
repeats each year. It is each year linearly increasing over time going
from 0 on January 1 at 00:00 to 1 on December 31st at 23:00. The values
are normalized to be between [0; 1].
Args:
datetime_col: Datetime column.
Returns:
A numpy array containing converted datatime_col into time of year.
"""
time_of_year = pd.DataFrame(
{"DayOfYear": datetime_col.dt.dayofyear, "HourOfDay": datetime_col.dt.hour, "Year": datetime_col.dt.year}
)
time_of_year["TimeOfYear"] = (time_of_year["DayOfYear"] - 1) * 24 + time_of_year["HourOfDay"]
time_of_year["YearLength"] = time_of_year["Year"].apply(lambda y: 366 if calendar.isleap(y) else 365)
time_of_year["TimeOfYear"] = time_of_year["TimeOfYear"] / (time_of_year["YearLength"] * 24 - 1)
return time_of_year["TimeOfYear"].values
def week_of_year(datetime_col):
"""Returns the week from a datetime column."""
return datetime_col.dt.week
def week_of_month(date_time):
"""Returns the week of the month for a specified date.
Args:
dt (Datetime): Input date
Returns:
wom (Integer): Week of the month of the input date
"""
def _week_of_month(date_time):
from math import ceil
first_day = date_time.replace(day=1)
dom = date_time.day
adjusted_dom = dom + first_day.weekday()
wom = int(ceil(adjusted_dom / 7.0))
return wom
if isinstance(date_time, pd.Series):
return date_time.apply(lambda x: _week_of_month(x))
else:
return _week_of_month(date_time)
def month_of_year(date_time_col):
"""Returns the month from a datetime column."""
return date_time_col.dt.month
def day_of_week(date_time_col):
"""Returns the day of week from a datetime column."""
return date_time_col.dt.dayofweek
def day_of_month(date_time_col):
"""Returns the day of month from a datetime column."""
return date_time_col.dt.day
def day_of_year(date_time_col):
"""Returns the day of year from a datetime column."""
return date_time_col.dt.dayofyear
def encoded_month_of_year(month_of_year):
"""
Create one hot encoding of month of year.
"""
month_of_year = pd.get_dummies(month_of_year, prefix="MonthOfYear")
return month_of_year
def encoded_day_of_week(day_of_week):
"""
Create one hot encoding of day_of_week.
"""
day_of_week = pd.get_dummies(day_of_week, prefix="DayOfWeek")
return day_of_week
def encoded_day_of_month(day_of_month):
"""
Create one hot encoding of day_of_month.
"""
day_of_month = pd.get_dummies(day_of_month, prefix="DayOfMonth")
return day_of_month
def encoded_day_of_year(day_of_year):
"""
Create one hot encoding of day_of_year.
"""
day_of_year = pd.get_dummies(day_of_year)
return day_of_year
def encoded_hour_of_day(hour_of_day):
"""
Create one hot encoding of hour_of_day.
"""
hour_of_day = pd.get_dummies(hour_of_day, prefix="HourOfDay")
return hour_of_day
def encoded_week_of_year(week_of_year):
"""
Create one hot encoding of week_of_year.
"""
week_of_year = pd.get_dummies(week_of_year, prefix="WeekOfYear")
return week_of_year
def normalized_current_year(datetime_col, min_year, max_year):
"""
Temporal feature indicating the position of the year of a record in the
entire time period under consideration, normalized to be between 0 and 1.
Args:
datetime_col: Datetime column.
min_year: minimum value of year.
max_year: maximum value of year.
Returns:
float: the position of the current year in the min_year:max_year range
"""
year = datetime_col.dt.year
if max_year != min_year:
current_year = (year - min_year) / (max_year - min_year)
elif max_year == min_year:
current_year = 0
return current_year
def normalized_current_date(datetime_col, min_date, max_date):
"""
Temporal feature indicating the position of the date of a record in the
entire time period under consideration, normalized to be between 0 and 1.
Args:
datetime_col: Datetime column.
min_date: minimum value of date.
max_date: maximum value of date.
Returns:
float: the position of the current date in the min_date:max_date range
"""
date = datetime_col.dt.date
current_date = (date - min_date).apply(lambda x: x.days)
if max_date != min_date:
current_date = current_date / (max_date - min_date).days
elif max_date == min_date:
current_date = 0
return current_date
def normalized_current_datehour(datetime_col, min_datehour, max_datehour):
"""
Temporal feature indicating the position of the hour of a record in the
entire time period under consideration, normalized to be between 0 and 1.
Args:
datetime_col: Datetime column.
min_datehour: minimum value of datehour.
max_datehour: maximum value of datehour.
Returns:
float: the position of the current datehour in the min_datehour:max_datehour range
"""
current_datehour = (datetime_col - min_datehour).apply(lambda x: x.days * 24 + x.seconds / 3600)
max_min_diff = max_datehour - min_datehour
if max_min_diff != 0:
current_datehour = current_datehour / (max_min_diff.days * 24 + max_min_diff.seconds / 3600)
elif max_min_diff == 0:
current_datehour = 0
return current_datehour
def normalized_columns(datetime_col, value_col, mode="log", output_colname="normalized_columns"):
"""
Creates columns normalized to be log of input columns devided by global average of each columns,
or normalized using maximum and minimum.
Args:
datetime_col: Datetime column.
value_col: Value column to be normalized.
mode: Normalization mode,
accepted values are 'log' and 'minmax'. Default value 'log'.
Returns:
Normalized value column.
"""
if not is_datetime_like(datetime_col):
datetime_col = pd.to_datetime(datetime_col, format=DATETIME_FORMAT)
df = pd.DataFrame({"Datetime": datetime_col, "value": value_col})
df.set_index("Datetime", inplace=True)
if not df.index.is_monotonic:
df.sort_index(inplace=True)
if mode == "log":
mean_value = df["value"].mean()
if mean_value != 0:
df[output_colname] = np.log(df["value"] / mean_value)
elif mean_value == 0:
df[output_colname] = 0
elif mode == "minmax":
min_value = min(df["value"])
max_value = max(df["value"])
if min_value != max_value:
df[output_colname] = (df["value"] - min_value) / (max_value - min_value)
elif min_value == max_value:
df[output_colname] = 0
else:
raise ValueError("Valid values for mode are 'log' and 'minmax'")
return df[[output_colname]]
def fourier_approximation(t, n, period):
"""
Generic helper function to create Fourier Series at different harmonies (n) and periods.
Args:
t: Datetime column.
n: Harmonies, n=0, 1, 2, 3,...
period: Period of the datetime variable t.
Returns:
float: Sine component
float: Cosine component
"""
x = n * 2 * np.pi * t / period
x_sin = np.sin(x)
x_cos = np.cos(x)
return x_sin, x_cos
def annual_fourier(datetime_col, n_harmonics):
"""
Creates Annual Fourier Series at different harmonies (n).
Args:
datetime_col: Datetime column.
n_harmonics: Harmonies, n=0, 1, 2, 3,...
Returns:
dict: Output dictionary containing sine and cosine components of
the Fourier series for all harmonies.
"""
day_of_year = datetime_col.dt.dayofyear
output_dict = {}
for n in range(1, n_harmonics + 1):
sin, cos = fourier_approximation(day_of_year, n, 365.24)
output_dict["annual_sin_" + str(n)] = sin
output_dict["annual_cos_" + str(n)] = cos
return output_dict
def weekly_fourier(datetime_col, n_harmonics):
"""
Creates Weekly Fourier Series at different harmonies (n).
Args:
datetime_col: Datetime column.
n_harmonics: Harmonies, n=0, 1, 2, 3,...
Returns:
dict: Output dictionary containing sine and cosine components of
the Fourier series for all harmonies.
"""
day_of_week = datetime_col.dt.dayofweek + 1
output_dict = {}
for n in range(1, n_harmonics + 1):
sin, cos = fourier_approximation(day_of_week, n, 7)
output_dict["weekly_sin_" + str(n)] = sin
output_dict["weekly_cos_" + str(n)] = cos
return output_dict
def daily_fourier(datetime_col, n_harmonics):
"""
Creates Daily Fourier Series at different harmonies (n).
Args:
datetime_col: Datetime column.
n_harmonics: Harmonies, n=0, 1, 2, 3,...
Returns:
dict: Output dictionary containing sine and cosine components of
the Fourier series for all harmonies.
"""
hour_of_day = datetime_col.dt.hour + 1
output_dict = {}
for n in range(1, n_harmonics + 1):
sin, cos = fourier_approximation(hour_of_day, n, 24)
output_dict["daily_sin_" + str(n)] = sin
output_dict["daily_cos_" + str(n)] = cos
return output_dict
def same_week_day_hour_lag(
datetime_col, value_col, n_years=3, week_window=1, agg_func="mean", q=None, output_colname="SameWeekHourLag"
):
"""
Creates a lag feature by calculating quantiles, mean and std of values of and
around the same week, same day of week, and same hour of day, of previous years.
Args:
datetime_col: Datetime column.
value_col: Feature value column to create lag feature from.
n_years: Number of previous years data to use. Default value 3.
week_window: Number of weeks before and after the same week to use,
which should help reduce noise in the data. Default value 1.
agg_func: Aggregation function to apply on multiple previous values,
accepted values are 'mean', 'quantile', 'std'. Default value 'mean'.
q: If agg_func is 'quantile', taking value between 0 and 1.
output_colname: name of the output lag feature column.
Default value 'SameWeekHourLag'.
Returns:
pd.DataFrame: data frame containing the newly created lag
feature as a column.
"""
if not is_datetime_like(datetime_col):
datetime_col = pd.to_datetime(datetime_col, format=DATETIME_FORMAT)
min_time_stamp = min(datetime_col)
max_time_stamp = max(datetime_col)
df = pd.DataFrame({"Datetime": datetime_col, "value": value_col})
df.set_index("Datetime", inplace=True)
week_lag_base = 52
week_lag_last_year = list(range(week_lag_base - week_window, week_lag_base + week_window + 1))
week_lag_all = []
for y in range(n_years):
week_lag_all += [x + y * 52 for x in week_lag_last_year]
week_lag_cols = []
for w in week_lag_all:
if (max_time_stamp - timedelta(weeks=w)) >= min_time_stamp:
col_name = "week_lag_" + str(w)
week_lag_cols.append(col_name)
lag_datetime = df.index.get_level_values(0) - timedelta(weeks=w)
valid_lag_mask = lag_datetime >= min_time_stamp
df[col_name] = np.nan
df.loc[valid_lag_mask, col_name] = df.loc[lag_datetime[valid_lag_mask], "value"].values
# Additional aggregation options will be added as needed
if agg_func == "mean" and q is None:
df[output_colname] = round(df[week_lag_cols].mean(axis=1))
elif agg_func == "quantile" and q is not None:
df[output_colname] = round(df[week_lag_cols].quantile(q, axis=1))
elif agg_func == "std" and q is None:
df[output_colname] = round(df[week_lag_cols].std(axis=1))
return df[[output_colname]]
def same_day_hour_lag(
datetime_col, value_col, n_years=3, day_window=1, agg_func="mean", q=None, output_colname="SameDayHourLag"
):
"""
Creates a lag feature by calculating quantiles, mean, and std of values of
and around the same day of year, and same hour of day, of previous years.
Args:
datetime_col: Datetime column.
value_col: Feature value column to create lag feature from.
n_years: Number of previous years data to use. Default value 3.
day_window: Number of days before and after the same day to use,
which should help reduce noise in the data. Default value 1.
agg_func: Aggregation function to apply on multiple previous values,
accepted values are 'mean', 'quantile', 'std'. Default value 'mean'.
q: If agg_func is 'quantile', taking value between 0 and 1.
output_colname: name of the output lag feature column.
Default value 'SameDayHourLag'.
Returns:
pd.DataFrame: data frame containing the newly created lag
feature as a column.
"""
if not is_datetime_like(datetime_col):
datetime_col = pd.to_datetime(datetime_col, format=DATETIME_FORMAT)
min_time_stamp = min(datetime_col)
max_time_stamp = max(datetime_col)
df = | pd.DataFrame({"Datetime": datetime_col, "value": value_col}) | pandas.DataFrame |
import requests
import pandas as pd
import numpy as np
from tempfile import NamedTemporaryFile
import os
import subprocess
from astropy.io import fits
import matplotlib.pyplot as plt
from . import spacegeometry
def getChandraObs(
obsID,
fileList
):
pass
def getHeaderInfo(
key,
header
):
catKeys = list(header.keys())
foundKey = False
for index in range(len(header)):
if key == header[index]:
catKey = catKeys[index]
unitKey = catKey.replace('TYPE', 'UNIT')
if unitKey == catKey:
unitKey = catKey.replace('TYP', 'UNIT')
if unitKey in header:
columnUnit = header[unitKey]
else:
columnUnit = None
columnIndexDict = {
'index': index,
'key': catKey
}
if columnUnit:
columnIndexDict['unit'] = columnUnit
foundKey = True
if not foundKey:
raise ValueError('Did not find columns %s in local catalog.' %key)
return columnIndexDict
def plotLocalCatalog(
catalogName='xmmsl2_clean.fits',
dirpath='/home/joel/Documents/pythonDev/research/pulsarJPDAF/pulsarData/xray_catalogs/',
fluxKey='FLUX_B8'
):
hdulist = fits.open(dirpath + catalogName)
catalogHeader = hdulist[1].header
catalogData = hdulist[1].data
hdulist.close()
minFlux = np.min(catalogData[fluxKey])
scaledFlux = np.array(catalogData[fluxKey] - minFlux)
maxFlux = np.max(scaledFlux)
scaledFlux = scaledFlux/maxFlux
plt.figure()
for index in range(len(catalogData)):
plt.scatter(catalogData[index]['RA'], catalogData[index]['DEC'], s=scaledFlux[index])
plt.show(block=False)
return
def localCatalog_coneSearch(
RA,
DEC,
FOV,
catalogName='xmmsl2_clean.fits',
dirpath='/home/joel/Documents/pythonDev/research/pulsarJPDAF/pulsarData/xray_catalogs/',
removeNaNs=True,
fluxKey='FLUX_B8',
extentKey='EXT_B8',
raKey='RA',
decKey='DEC',
srcNameKey='UNIQUE_SRCNAME'
):
hdulist = fits.open(dirpath + catalogName)
catalogHeader = hdulist[1].header
catalogData = hdulist[1].data
hdulist.close()
columns = [srcNameKey, raKey, decKey, fluxKey, extentKey]
savedColumns = []
columnIndexDict = {}
catKeys = list(catalogHeader.keys())
for index in range(len(catalogHeader)):
for column in columns:
if column == catalogHeader[index]:
catKey = catKeys[index]
unitKey = catKey.replace('TYPE', 'UNIT')
if unitKey in catalogHeader:
columnUnit = catalogHeader[unitKey]
else:
columnUnit = None
columnIndexDict[column] = {
'index': index,
'key': catKey
}
if columnUnit:
columnIndexDict[column]['unit'] = columnUnit
columns.remove(column)
savedColumns.append(column)
if columns:
raise ValueError('Did not find columns %s in local catalog.' %columns)
if columnIndexDict[raKey]['unit'] == 'rad':
raConversionFactor = 1
elif columnIndexDict[raKey]['unit'] == 'degrees' or columnIndexDict[raKey]['unit'] == 'degree':
raConversionFactor = np.pi / 180.0
if columnIndexDict[decKey]['unit'] == 'rad':
decConversionFactor = 1
elif columnIndexDict[decKey]['unit'] == 'degrees' or columnIndexDict[decKey]['unit'] == 'degree':
decConversionFactor = np.pi/180.0
if RA['unit'] == 'rad':
referenceRA = RA['value']
elif RA['unit'] == 'degrees':
referenceRA = RA['value'] * np.pi / 180.0
else:
raise ValueError('Unrecougnized RA units %s' % RA['unit'])
if DEC['unit'] == 'rad':
referenceDec = DEC['value']
elif DEC['unit'] == 'degrees':
referenceDec = DEC['value'] * np.pi / 180.0
else:
raise ValueError('Unrecougnized Dec units %s' % DEC['unit'])
if FOV['unit'] == 'rad':
FOVVal = FOV['value']
elif FOV['unit'] == 'degrees':
FOVVal = FOV['value'] * np.pi / 180.0
else:
raise ValueError('Unrecougnized FOV units %s' % FOV['unit'])
referenceUnitVector = spacegeometry.sidUnitVec(
referenceRA,
referenceDec
)
mySourceDF = pd.DataFrame(columns=savedColumns)
for source in catalogData:
sourceUnitVector = spacegeometry.sidUnitVec(
source[raKey] * raConversionFactor,
source[decKey] * decConversionFactor
)
angularDiff = np.arccos(referenceUnitVector.dot(sourceUnitVector))
if angularDiff < (FOVVal/2):
mySrcDict = {}
skipVal = False
for columnName, columnInfo in columnIndexDict.items():
if not skipVal:
if 'unit' in columnInfo:
mySrcDict[columnName] = {
'value': source[columnName],
'unit': columnInfo['unit'].replace('cm2', 'cm^2')
}
else:
mySrcDict[columnName] = source[columnName]
if removeNaNs:
try:
skipVal = np.isnan(source[columnName])
except:
skipVal = False
if not skipVal:
mySourceDF = mySourceDF.append(mySrcDict, ignore_index=True)
return mySourceDF
def xamin_coneSearch(
RA,
DEC,
FOV,
angleUnits='degrees',
catalog='xray',
removeNullFlux=True,
fluxKey='flux'
):
if angleUnits == 'degrees':
FOVArcmin = FOV * 60
elif angleUnits == 'radians':
FOVArcmin = FOV * 3437.75
elif angleUnits == 'arc':
FOVArcmin = FOV
dirpath = '/home/joel/Documents/pythonDev/modules/ModularFilter/modest/utils'
fieldCommand = 'fields=name,ra,dec,%s' % fluxKey
myCommand = ['java',
'-jar',
dirpath + '/users.jar',
'table=%s' %catalog,
'position=\'%s, %s\'' % (RA, DEC),
'radius=%s' % FOVArcmin,
fieldCommand]
print(myCommand)
# myQuery += ('table=%s' % catalog)
# myQuery += ('position=\'%s, %s\'' % (RA, DEC))
# myQuery += ('radius=%s' % FOV)
# subprocess.call(['java', '-jar', 'users.jar'], env=env)
# process = subprocess.Popen(['java', '-jar', 'users.jar'], stdout=subprocess.PIPE)
process = subprocess.Popen(myCommand, stdout=subprocess.PIPE)
output = process.stdout
print(output)
outputDF = pd.read_csv(output, sep='|', comment='#').dropna(how='any')
outputDF.columns = outputDF.columns.str.strip()
outputDF = outputDF.rename(columns={str.lower(fluxKey):'flux'})
print(outputDF)
for row in range(len(outputDF)):
try:
outputDF.set_value(row, 'flux', outputDF.loc[row]['flux'])
except:
if removeNullFlux is True:
outputDF.drop(row, inplace=True)
# print('Dropping row %i' %(row))
outputDF.reset_index()
return(outputDF)
def chandraPSC_coneSearch(
RA,
DEC,
FOV,
FOVUnits='degrees',
minSignificance=0
):
if FOVUnits == 'degrees':
FOVArcmin = FOV * 60
elif FOVUnits == 'radians':
FOVArcmin = FOV * 3437.75
elif FOVUnits == 'arcmins':
FOVArcmin = FOV
else:
raise ValueError('Unrecougnized unit for FOV. Use either degrees, radians, or arcmins.')
baseQuery=(
'http://cda.cfa.harvard.edu/csccli/getProperties?query='
'SELECT m.name, m.ra, m.dec, m.flux_aper_b, m.significance ' +
'FROM master_source m ' +
'WHERE (' +
'dbo.cone_distance(m.ra,m.dec,%s,%s)<=%s'
%(RA, DEC, FOVArcmin)
)
if minSignificance > 0:
baseQuery = (
baseQuery +
'AND m.significance > %s)'
%minSignificance
)
else:
baseQuery = baseQuery + ')'
print(baseQuery)
response=requests.get(baseQuery)
# t = TemporaryFile()
# with open('./tmp', 'wb') as f:
# f.write(response.content)
with NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(response.content)
resultsDF = | pd.read_csv(f.name, sep='\t', comment='#') | pandas.read_csv |
import logging
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager, asynccontextmanager
from typing import (
Union, Sequence, List, Tuple,
Type, ContextManager, AsyncContextManager,
Iterator, AsyncIterator,
)
import pandas as pd
import aioodbc
import MySQLdb.connections
logger = logging.getLogger(__name__)
class Connection:
def __init__(self, conn, cursor):
self._conn = conn
self._cursor = cursor
if isinstance(conn, MySQLdb.connections.Connection):
self._autocommit = conn.get_autocommit()
else:
self._autocommit = conn.autocommit
def commit(self) -> None:
assert not self._autocommit
self._conn.commit()
def rollback(self) -> None:
assert not self._autocommit
self._conn.rollback()
def _execute_(self, sql, *args, **kwargs) -> None:
logger.debug('executing SQL statement\n%s\nargs:\n%s\nkwargs:\n%s',
sql, str(args), str(kwargs))
self._cursor.execute(sql, *args, **kwargs)
def _execute(self, sql: str, *args, **kwargs) -> None:
try:
self._execute_(sql, *args, **kwargs)
except:
msg = f'Failed to execute SQL\n{sql}\nargs:\n{args}\nkwargs:\n{kwargs}'
logger.exception(msg)
raise
def read(self, sql: str, *args, **kwargs) -> 'Connection':
self._execute(sql, *args, **kwargs)
return self
def iterrows(self) -> Iterator[Tuple]:
"""
Iterate over rows in result after calling ``read``,
one row at a time.
"""
return iter(self._cursor)
def iterbatches(self, batch_size: int) -> Iterator[List[Tuple]]:
"""
This method is called after ``read`` to iter over results, one batch at a time.
"""
while True:
rows = self.fetchmany(batch_size)
if rows:
yield rows
else:
break
@property
def headers(self) -> List[str]:
"""
Return column headers after calling ``read``.
This can be used to augment the returns of `fetchone`, `fetchmany`, `fetchall`,
which return values only, i.e. they do not return column headers.
"""
return [x[0] for x in self._cursor.description]
def fetchone(self) -> Union[Tuple, None]:
"""
Fetch the next row of a query result set, returning a single sequence,
or None when no more data is available.
An Error (or subclass) exception is raised if the previous call to
``read`` did not produce any result set or no call to ``read`` was issued yet.
"""
return self._cursor.fetchone()
def fetchone_pandas(self) -> pd.DataFrame:
row = self.fetchone()
if not row:
return pd.DataFrame(columns=self.headers)
return pd.DataFrame.from_records([row], columns=self.headers)
def fetchmany(self, n: int) -> Sequence[Tuple]:
"""
Fetch the next set of rows of a query result, returning a sequence of sequences
(e.g. a list of tuples).
An empty sequence is returned when no more rows are available.
An Error (or subclass) exception is raised if the previous call to
``read`` did not produce any result set or no call to ``read`` was issued yet.
"""
return self._cursor.fetchmany(n)
def fetchmany_pandas(self, n: int) -> pd.DataFrame:
rows = self.fetchmany(n)
if not rows:
return pd.DataFrame(columns=self.headers)
return pd.DataFrame.from_records(rows, columns=self.headers)
def fetchall(self) -> List[Tuple]:
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a tuple of tuples).
Note that the cursor's arraysize attribute can affect the performance of this operation.
An Error (or subclass) exception is raised if the previous call to
``read`` did not produce any result set or no call to ``read`` was issued yet.
"""
return self._cursor.fetchall()
def fetchall_pandas(self) -> pd.DataFrame:
"""
This method is called after ``read`` to fetch the results as a ``pandas.DataFrame``.
Warning: do not use this if the result contains a large number of rows.
"""
rows = self.fetchall()
if not rows:
return pd.DataFrame(columns=self.headers)
return pd.DataFrame.from_records(list(rows), columns=self.headers)
def write(self, sql: str, *args, **kwargs) -> None:
self._execute(sql, *args, **kwargs)
class AsyncConnection:
def __init__(self,
conn: aioodbc.connection.Connection,
cursor: aioodbc.cursor.Cursor):
self._conn = conn
self._cursor = cursor
self._autocommit = conn.autocommit
async def commit(self) -> None:
assert not self._autocommit
await self._conn.commit()
async def rollback(self) -> None:
assert not self._autocommit
await self._conn.rollback()
async def _execute_(self, sql, *args, **kwargs):
logger.debug('executing SQL statement:\n%s\nargs:\n%s\nkwargs:\n%s',
sql, args, kwargs)
await self._cursor.execute(sql, *args, **kwargs)
async def _execute(self, sql: str, *args, **kwargs) -> None:
try:
await self._execute_(sql, *args, **kwargs)
except:
msg = f'Failed to execute SQL:\n{sql}\nargs:\n{args}\nkwargs:\n{kwargs}'
logger.exception(msg)
raise
async def read(self, sql: str, *args, **kwargs) -> 'AsyncConnection':
await self._execute(sql, *args, **kwargs)
return self
async def iterrows(self) -> AsyncIterator[Tuple]:
return iter(self._cursor)
async def iterbatches(self, batch_size: int) -> AsyncIterator[List[Tuple]]:
while True:
rows = await self.fetchmany(batch_size)
if rows:
yield rows
else:
break
@property
def headers(self) -> List[str]:
return [x[0] for x in self._cursor.description]
async def fetchone(self) -> Union[Tuple, None]:
return await self._cursor.fetchone()
async def fetchone_pandas(self) -> pd.DataFrame:
row = await self.fetchone()
if not row:
return | pd.DataFrame(columns=self.headers) | pandas.DataFrame |
from kafka import KafkaConsumer
from pathlib import Path
from requests import post, exceptions, put
from requests.auth import HTTPBasicAuth
from logger import log
import tensorflow as tf
import pandas as pd
import json
import os
HOME_SERVICE_AUTH = HTTPBasicAuth('model-builder', 'secret')
MODELS_BASE_PATH = '/models' # './models' for local development
class ModelBuilder:
def __init__(self):
self.categories_dict = {}
self.consumer = KafkaConsumer(
'UserData',
bootstrap_servers='kafka:9092',
api_version=(0, 10, 0),
value_deserializer=lambda m: json.loads(m.decode('utf-8'))
)
log('Model builder initialized')
def __get_category(self, category_id):
return self.categories_dict[category_id]
@staticmethod
def __split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
@staticmethod
def __build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
tf.keras.layers.GRU(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
return model
@staticmethod
def __loss_function(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def generate_and_save_model_from_csv(self, model_id, csv_file_path):
log('Building files structure')
model_dir = MODELS_BASE_PATH + '/' + model_id + '_model'
os.makedirs(model_dir, exist_ok=True)
header_list = ["timestamp", "sensor", "action"]
sensors_df = pd.read_csv(csv_file_path, names=header_list)
sensors_df['sensors_with_action'] = sensors_df['sensor'] + '_' + sensors_df['action']
sensors_df['sensors_with_action_code'] = pd.Categorical(sensors_df['sensors_with_action'])
categories = pd.Categorical(sensors_df['sensors_with_action_code'])
CATEGORIES_AMOUNT = len(categories.categories.values)
log(f'There is {CATEGORIES_AMOUNT} unique categories')
self.categories_dict = dict(enumerate(sensors_df['sensors_with_action_code'].cat.categories))
category_dict_file = Path(model_dir + '/category_dict.json')
category_dict_file.write_text(json.dumps(self.categories_dict, indent=4) + '\n')
sensors_df['sensors_with_action_code'] = sensors_df.sensors_with_action_code.cat.codes
dataset_length = len(sensors_df)
log(f'Event records amount {dataset_length}')
Y_data = sensors_df.iloc[1:dataset_length]
Y_data = | pd.concat([Y_data, sensors_df.iloc[0:1]], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Save atlases propagation results using registration with dense displacement fields predicted from networks.
@author: <NAME>
@version: 0.1
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from core import model_ddf_mvmm_label_base as model
from core import image_dataset as image_utils
from core import utils, losses
# import nibabel as nib
import numpy as np
import os
import logging
import tensorflow as tf
import pandas as pd
import argparse
from datetime import datetime
t = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
parser = argparse.ArgumentParser(description='Start atlas propagation on test dataset!')
parser.add_argument('--time', default=t, type=str,
help='The current time to save test predictions.')
parser.add_argument('--space', type=str, default='commonspace2',
choices=['commonspace1', 'commonspace2'],
help='The commonspace type for test data.')
parser.add_argument('--spacing', default='2mm', type=str, choices=['1mm', '2mm'],
help='The spatial spacing of the network inputs and the dense displacement fields.')
parser.add_argument('--dropout', default=0, type=float, help='The dropout probability for network prediction.')
parser.add_argument('--dropout_type', default='regular', type=str, choices=['regular', 'spatial'],
help='dropout type')
parser.add_argument('--model_path', type=str, default=None,
help='The model path to restore the network parameters for network prediction.')
parser.add_argument('--latest_filename', default='best_checkpoint', type=str,
help='latest filename to restore the model')
parser.add_argument('--trial', default=0, type=int, help='which trial to load the model')
parser.add_argument('--cuda_device', default=0, type=int,
help='The cuda device for network prediction.')
parser.add_argument('--atlas_search_path', default='../../../dataset/training_mr_20_commonspace2/*.nii.gz', type=str,
help='The search pattern to find all training atlas images, labels and probabilities.')
parser.add_argument('--atlas_modality', default='mr', choices=['mr', 'ct'],
help="the modality of atlas image, either 'mr' or 'ct'")
parser.add_argument('--a_min', default=None, type=float, help='min value for intensity clipping')
parser.add_argument('--a_max', default=None, type=float, help='max value for intensity clipping')
parser.add_argument('--image_suffix', default='image.nii.gz', type=str,
help='suffix pattern for the images')
parser.add_argument('--label_suffix', default='label.nii.gz', type=str,
help='suffix pattern for the labels')
parser.add_argument('--weight_suffix', default=None, type=None,
help='suffix pattern for the weights')
parser.add_argument('--crop_patch', default=True, type=bool,
help='whether to crop patches of the test data')
parser.add_argument('--patch_center', default=None, nargs='+',
help='The customized patch center, default is None.')
parser.add_argument('--patch_size', default=(80, 80, 80), type=int, nargs='+',
help='The size of the cropped patch.')
parser.add_argument('--original_size', default=(112, 96, 112), type=int, nargs=3,
help='original size of the saved image')
parser.add_argument('--num_blocks', default=(1, 1, 1), type=int, nargs='+',
help='The number of blocks of input along each axis, default is (1, 1, 1).')
parser.add_argument('--method', default='unet',
choices=['ddf_label', 'ddf_label_v0', 'unet'], type=str,
help='the method of network to infer the dense displacement fields')
parser.add_argument('--num_down_blocks', default=4, type=int,
help='the number of downside convolution blocks of the network')
parser.add_argument('--ddf_levels', default=None, type=int, nargs='*',
help='the network levels where to extract dense displacement fields')
parser.add_argument('--features_root', default=32, type=int,
help='number of features of the first convolution layer')
parser.add_argument('--normalizer', default=None, type=str,
choices=['batch', 'group', 'layer', 'instance', 'batch_instance'],
help='type of network normalization method')
parser.add_argument('--diffeomorphism', default=False, action='store_true',
help='whether to use diffeomorphic transformations')
parser.add_argument('--int_steps', default=4, type=int,
help='number of integration steps on the velocity fields')
parser.add_argument('--cost_function', default='label_consistency',
choices=(['MvMM_negative_log-likelihood', 'label_consistency', 'multi_scale_label_consistency',
'dice', 'multi_scale_dice', 'cross_entropy', 'SSD']),
help='the type of cost function for network optimization')
parser.add_argument('--reg_stage', default='single', type=str, choices=['single', 'multi'],
help="The registration stage, either 'single' or 'multi'.")
parser.add_argument('--test_input_size', default=(112, 96, 112), type=int, nargs='+',
help='The test input size.')
parser.add_argument('--save_ddf', default=False, action='store_true',
help='whether to save displacement field into nifty files')
# parser.add_argument('--save_path', default='./', type=str,
# help="Path where to save the test results.")
args = parser.parse_args()
# determine the prediction/metrics save path and the data search path
if args.space == 'commonspace1':
save_path = os.path.join(args.model_path, 'test_predictions_commonspace1_%s' % args.spacing)
target_search_path = '../../../dataset/test_mr_40_commonspace1/*.nii.gz'
metrics_path = os.path.join(args.model_path, 'metrics_test_pairwise_commonspace1_%s.xlsx' % args.spacing)
scale_model = 1
elif args.space == 'commonspace2':
save_path = os.path.join(args.model_path, 'test_predictions_commonspace2_%s' % args.spacing)
target_search_path = '../../../dataset/test_mr_40_commonspace2/*.nii.gz'
metrics_path = os.path.join(args.model_path, 'metrics_test_pairwise_commonspace2_%s.xlsx' % args.spacing)
scale_model = 0
else:
raise Exception("The space must be either 'commonspace1' or 'commonspace2'!")
if __name__ == '__main__':
# set cuda device
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
# set working directory
print("Current working directory: %s" % os.getcwd())
os.chdir('../')
print("Working directory changed to: %s" % os.path.abspath(os.getcwd()))
if not os.path.exists(save_path):
logging.info("Allocating '{:}'".format(save_path))
os.makedirs(save_path)
if 'model_trained' in args.model_path and 'trial' in args.model_path:
model_path = args.model_path
else:
model_dir = os.path.join(args.model_path, 'trial_%s' % args.trial, 'model_trained')
ckpt = tf.train.get_checkpoint_state(model_dir, latest_filename=args.latest_filename)
model_path = ckpt.model_checkpoint_path
test_model_data_provider = image_utils.ImageDataProvider(
target_search_path='../../../dataset/test_mr_40_commonspace2/*.nii.gz',
atlas_search_path=args.atlas_search_path,
atlas_modality=args.atlas_modality,
a_min=args.a_min,
a_max=args.a_max,
image_suffix=args.image_suffix,
label_suffix=args.label_suffix,
train_phase=False,
n_atlas=1,
crop_patch=args.crop_patch,
patch_center=args.patch_center,
patch_size=args.patch_size,
crop_roi=False,
image_normalization=True,
channels=1,
n_class=8,
label_intensity=(0, 205, 420, 500, 550, 600, 820, 850),
scale=0,
num_blocks=args.num_blocks,
image_name_index_begin=-38,
stage=args.reg_stage)
test_data_provider = image_utils.ImageDataProvider(target_search_path=target_search_path,
atlas_search_path=args.atlas_search_path,
atlas_modality=args.atlas_modality,
a_min=args.a_min,
a_max=args.a_max,
image_suffix=args.image_suffix,
label_suffix=args.label_suffix,
weight_suffix=args.weight_suffix,
train_phase=False,
n_atlas=1,
crop_patch=False,
# patch_center=[i*2**scale_model
# for i in args.patch_center]
# if args.patch_center else None,
# patch_size=[i*2**scale_model
# for i in args.patch_size],
crop_roi=False,
image_normalization=False,
channels=1,
n_class=8,
label_intensity=(0, 205, 420, 500, 550, 600, 820, 850),
scale=0,
num_blocks=args.num_blocks,
stage=args.reg_stage,
image_name_index_begin=-38)
logging.info("Number of target-atlas pairs: %s" % len(test_data_provider))
with tf.Graph().as_default():
net = model.NetForPrediction(n_blocks=args.num_blocks,
test_input_size=args.test_input_size,
input_scale=scale_model,
input_size=args.patch_size,
channels=1,
n_class=2,
test_n_class=8,
n_atlas=1,
n_subtypes=(2, 1),
method=args.method,
features_root=args.features_root,
normalizer=args.normalizer,
num_down_blocks=args.num_down_blocks,
dropout_type=args.dropout_type,
ddf_levels=args.ddf_levels,
diffeomorphism=args.diffeomorphism,
int_steps=args.int_steps,
cost_kwargs={'cost_name': args.cost_function,
'regularizer': [None, 'bending_energy'],
'regularization_coefficient': [0., 1.]})
# add number of negative Jacobians
BendingEnergy = losses.LocalDisplacementEnergy('bending')
jacobian_det = BendingEnergy.compute_jacobian_determinant(net.ddf)
num_neg_jacob = tf.math.count_nonzero(tf.less_equal(jacobian_det, 0), dtype=tf.float32,
name='negative_jacobians_number')
setattr(net, 'num_neg_jacob', num_neg_jacob)
# remove duplication of names
frame_index = utils.remove_duplicates([os.path.split(pair_names[0])[-1]
for pair_names in test_data_provider.target_atlas_image_names])
frame_columns = utils.remove_duplicates([os.path.split(pair_names[1][0])[-1][-39:]
for pair_names in test_data_provider.target_atlas_image_names])
# list the metrics that need saving
metrics_to_save = {'Dice': np.empty([len(frame_index), len(frame_columns)]),
'Jaccard': np.empty([len(frame_index), len(frame_columns)]),
'Myocardial Dice': np.empty([len(frame_index), len(frame_columns)]),
'LA Dice': np.empty([len(frame_index), len(frame_columns)]),
'LV Dice': np.empty([len(frame_index), len(frame_columns)]),
'RA Dice': np.empty([len(frame_index), len(frame_columns)]),
'RV Dice': np.empty([len(frame_index), len(frame_columns)]),
'AO Dice': np.empty([len(frame_index), len(frame_columns)]),
'PA Dice': np.empty([len(frame_index), len(frame_columns)]),
'# Negative Jacobians': np.empty([len(frame_index), len(frame_columns)]),
}
with tf.Session(config=config) as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
# Restore model parameters from previously saved model
net.restore(sess, model_path, var_list=net.variables_to_restore)
for idx, name in enumerate(test_data_provider.target_atlas_image_names):
target_name = os.path.split(test_data_provider.target_atlas_image_names[idx][0])[-1]
atlas_names = '-*-'.join([os.path.split(atlas_name)[-1] for atlas_name in test_data_provider.target_atlas_image_names[idx][1]])
if args.space == 'commonspace1':
assert os.path.split(
test_model_data_provider.target_atlas_image_names[idx][0])[-1] == target_name.replace(
'commonspace1', 'commonspace2')
assert '-*-'.join(
[os.path.split(atlas_name)[-1]
for atlas_name in test_model_data_provider.target_atlas_image_names[idx][1]]) == atlas_names.replace(
'commonspace1', 'commonspace2')
elif args.space == 'commonspace2':
assert os.path.split(test_model_data_provider.target_atlas_image_names[idx][0])[-1] == target_name
assert '-*-'.join(
[os.path.split(atlas_name)[-1]
for atlas_name in test_model_data_provider.target_atlas_image_names[idx][1]]) == atlas_names
logging.info("Fixed image: Target {:}, "
"Moving image: Atlas {:}".format(target_name, atlas_names))
# load data for network input
model_data = test_model_data_provider[idx]
# print(model_data['atlases_label'].shape, model_data['atlases_label'].dtype)
# load data for label propagation and result evaluation
test_data = test_data_provider[idx]
# perform atlas transformation
warped_atlas_image, warped_atlas_label, warped_atlas_weight,\
ddf, metrics = net.predict_scale(sess, model_data, test_data, args.dropout)
# save metrics for the current target-atlas pair
for k, v in metrics_to_save.items():
v[idx // len(frame_columns), idx % len(frame_columns)] = metrics[k]
# save output into Nifty files
# utils.save_prediction_nii(warped_atlas_image.squeeze(0), save_path, test_data_provider,
# data_type='image', name_index=idx,
# affine=test_data['target_affine'], header=test_data['target_header'],
# save_suffix=args.image_suffix, stage=args.reg_stage,
# # original_size=args.original_size
# )
utils.save_prediction_nii(warped_atlas_label.squeeze(0), save_path, test_data_provider,
data_type='label', name_index=idx,
affine=test_data['target_affine'], header=test_data['target_header'],
save_suffix=args.label_suffix, stage=args.reg_stage,
# original_size=args.original_size
)
if args.weight_suffix:
utils.save_prediction_nii(warped_atlas_weight.squeeze(0), save_path, test_data_provider,
data_type='image', name_index=idx,
affine=test_data['target_affine'], header=test_data['target_header'],
save_suffix=args.weight_suffix, save_dtype=np.float32, squeeze_channel=False,
stage=args.reg_stage,
# original_size=args.original_size
)
if args.save_ddf:
utils.save_prediction_nii(ddf.squeeze((0, -2)), save_path, test_data_provider,
data_type='vector_fields', name_index=idx,
affine=test_data['target_affine'], header=test_data['target_header'],
stage=args.reg_stage, original_size=args.original_size)
# save metrics into DataFrames
metrics_DataFrames = {}
for k, v in metrics_to_save.items():
metrics_DataFrames[k] = pd.DataFrame(v, index=frame_index, columns=frame_columns, dtype=np.float32)
# save metrics into excel files
with | pd.ExcelWriter(metrics_path) | pandas.ExcelWriter |
import os
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sqlalchemy
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import cross_val_predict
from imblearn.combine import SMOTETomek
import json
class testing():
def __init__(self, original_data, webappdata):
self.availdata = original_data
self.webappdata = webappdata
def get_data(self):
# data from MySQL Database
#https://dev.to/fpim/object-oriented-design-architecture-with-panda-me4 ### Reference for data cleaning and making perfect data
### Have an Connection to Database
engine = sqlalchemy.create_engine('mysql+pymysql://root:idntknwpassword#404@localhost:3306/churnapp')
avail_df = pd.read_sql_table(self.availdata, engine)
app_df = pd.read_sql_table(self.webappdata, engine)
raw_data = pd.concat([avail_df, app_df], axis=0)
return raw_data
def preprocess_input(self):
raw_data = self.get_data()
df = | pd.read_csv(raw_data) | pandas.read_csv |
import argparse
import numpy as np
import pandas as pd
from settings import experiments, lambdas, functions, TRANSIENT_VALUE, RESULT_DIR
from statistics import response_time_blockchain, number_users_system, calculate_transient, mean_error, \
bar_plot_metrics, bar_plot_one_metric, plot_transient, new_plot, new_plot_transient
from utility import read_csv, extract_data_function, filter_lambda_status, phase_path, experiment_path, \
filter_transient_time, filter_fn_lambda, exists_dir, join_paths
def join_dataframe() -> pd.DataFrame:
df_join = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import json
import math
import sys
import glob
import argparse
import os
from collections import namedtuple, defaultdict
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
import pandas
RunConfig = namedtuple("RunConfig", "scheduler fec")
RunInfo = namedtuple("RunInfo", "count total durations interrupted_segments interrupt_times bitrates segment_bitrates segment_download_times segment_filenames initial_buffering")
PALETTE_5 = sns.color_palette("muted")
PALETTE_9 = sns.color_palette("muted")
PALETTE_9[4:9] = PALETTE_9[:5]
class FIGSIZE():
BOX_M = (5, 5)
WIDE_M = (12, 5)
WIDE_L = (15, 8)
def get_mean(l):
return sum(l) / len(l)
def get_stddev(l):
mean = get_mean(l)
return math.sqrt(sum([(x - mean)**2 for x in l]) / (len(l) - 1))
def get_median(l):
return sorted(l)[len(l) // 2]
def get_z_score(x, mean, stddev):
return abs((x - mean) / stddev)
def z_filter(l, cutoff = 2.5):
mean = get_mean(l)
stddev = get_stddev(l)
return list(filter(lambda x: get_z_score(x, mean, stddev) < cutoff, l))
def fixname(name):
name = name[:3].replace("IOD", "R-IOD") + name[3:]
name = name.replace("XOR4-1", "XOR 4")
name = name.replace("XOR16-1", "XOR 16")
return name.replace("LL", "LowRTT")
def get_population_stats(p):
return ", ".join([
f"mean: {round(get_mean(p), 2)}",
f"median: {round(get_median(p), 2)}",
f"stddev: {round(get_stddev(p), 2)}",
f"min: {round(min(p), 2)}",
f"max: {round(max(p), 2)}",
f"sum: {round(sum(p), 2)}",
])
def read_log(filename, slow_start_duration = 15):
with open(filename, 'rb') as fo:
log = json.load(fo)
conf = RunConfig(log['scheduler'], log['fecConfig'])
total = 0.0
start_time = log['playback_info']['start_time']
initial_buffering = float(log['playback_info']['initial_buffering_duration'])
count = 0
durations = []
interrupted_segments = []
interrupt_times = []
for event in log['playback_info']['interruptions']['events']:
seg_no = event['segment_number']
start = event['timeframe'][0]
end = event['timeframe'][1]
duration = end - start
if start < start_time + slow_start_duration:
# ignore first few seconds of stream
continue
# some interruptions are really short, ignore?
if duration < 1e-4:
continue
# some, on the other hand, are unrealistically long. this points
# towards a crash in the server and can be ignored
if duration > 10:
continue
count += 1
durations.append(duration)
total += duration
interrupted_segments.append(seg_no)
interrupt_times.append({
"start": start - start_time,
"end": end - start_time,
"duration": duration,
})
segment_filenames = [x[0] for x in log['segment_info']]
segment_bitrates = [int(x[1]) for x in log['segment_info']]
segment_download_times = [float(x[3]) for x in log['segment_info']]
bitrates = set(segment_bitrates)
return conf, RunInfo(count, total, durations, interrupted_segments,
interrupt_times, bitrates, segment_bitrates,
segment_download_times, segment_filenames, initial_buffering)
def print_stats(allInfos):
for conf, infos in allInfos.items():
print(f"=== {conf.scheduler}, {conf.fec} ===")
print("> population size")
print(f" {len(infos)}")
print("> count")
counts = [x.count for x in infos]
print(f" {get_population_stats(counts)}")
print("> total")
totals = [x.total for x in infos]
print(f" {get_population_stats(totals)}")
print("> bitrates")
bitrates = []
for info in infos:
bitrates += info.segment_bitrates
print(f" {get_population_stats(bitrates)}")
print("> bitrate switching (up)")
bitrate_up = []
for info in infos:
count = 0
for prev, current in zip(info.segment_bitrates[:-1], info.segment_bitrates[1:]):
if prev < current:
count += 1
bitrate_up.append(count)
print(f" {get_population_stats(bitrate_up)}")
print("> bitrate switching (down)")
bitrate_down = []
for info in infos:
count = 0
for prev, current in zip(info.segment_bitrates[:-1], info.segment_bitrates[1:]):
if prev > current:
count += 1
bitrate_down.append(count)
print(f" {get_population_stats(bitrate_down)}")
def visualize_boxplot(allInfos):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = z_filter([x.count for x in infos])
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.set(ylim=(0, None))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig("vis-boxplot." + FORMAT)
def visualize_boxplot_split(allInfos):
data_a = {}
data_b = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data_a[key] = []
data_b[key] = []
for info in infos:
count_a = 0
count_b = 0
for interrupt_time in info.interrupt_times:
if interrupt_time["start"] < 100:
count_a+=1
else:
count_b+=1
data_a[key].append(count_a)
data_b[key].append(count_b)
# fill missing recordings with NaNs
maxlen_a = max([len(data_a[k]) for k in data_a.keys()])
maxlen_b = max([len(data_b[k]) for k in data_b.keys()])
for k, v in data_a.items():
data_a[k] = v + [float('nan')] * (maxlen_a - (len(v)))
for k, v in data_b.items():
data_b[k] = v + [float('nan')] * (maxlen_b - (len(v)))
# draw A
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
df = pandas.DataFrame.from_dict(data_a)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set(ylim=(0, None))
plt.savefig("vis-boxplot-split-a." + FORMAT)
# draw B
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
df = pandas.DataFrame.from_dict(data_b)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set(ylim=(0, None))
plt.savefig("vis-boxplot-split-b." + FORMAT)
def visualize_distplot_interrupts(allInfos):
plt.figure(figsize=(6,5))
sns.set(style="whitegrid", palette=PALETTE_9)
data = {
"config": [],
"interrupted_segments": [],
}
configs = set()
segments_count = 0
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler} - {conf.fec}".upper())
configs.add(key)
for info in infos:
data["config"].extend([key]*len(info.interrupted_segments))
data["interrupted_segments"].extend(info.interrupted_segments)
segments_count = max(segments_count, len(info.segment_bitrates))
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = plt.gca()
pal = sns.cubehelix_palette(10, rot=-.25, light=.7)
g = sns.FacetGrid(df, row="config", hue="config", aspect=10, height=1, palette=pal)
g.map(sns.kdeplot, "interrupted_segments", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2, clip=(0, segments_count))
##g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Set the subplots to overlap
#g.fig.subplots_adjust(hspace=-.25)
#g.set_titles("")
g.set(yticks=[], xlabel='Segments')
g.despine(bottom=True, left=True, right=True)
plt.savefig("vis-dist-interrupts." + FORMAT)
def visualize_distplot_interrupts_cumulative(allInfos):
plt.figure(figsize=(10,6))
sns.set(style="ticks", palette=PALETTE_5)
data = {}
configs = set()
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler} - {conf.fec}".upper())
configs.add(key)
data[key] = [x.count for x in infos]
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
kwargs = {"cumulative": True}
patches = []
for i, config in enumerate(configs):
ax = sns.distplot(data[config], hist=False, kde_kws=kwargs)
patches.append(mpatches.Patch(
color=sns.color_palette()[i],
label=config
))
ax.set(xlabel='# Interruptions', ylabel='')
plt.legend(handles=patches)
plt.savefig("vis-dist-amount-cumulative." + FORMAT)
def visualize_boxplot_accumulated(allInfos, split=False):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = [x.total for x in infos]
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='Accumulated Interruption Duration (s)')
ax.set(ylim=(0, None))
plt.savefig("vis-boxplot2." + FORMAT)
def visualize_boxplot_mean(allInfos, split=False):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette="pastel")
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = []
for x in infos:
data[key] += x.durations
data[key] = z_filter(data[key])
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = sns.boxplot(palette=PALETTE_9, data=df, showfliers=False)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='Interruption Duration (s)')
ax.set(ylim=(0, None))
plt.savefig("vis-boxplot3." + FORMAT)
def visualize_boxplot_mean_split(allInfos, split=False):
data_a = {}
data_b = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data_a[key] = []
data_b[key] = []
for info in infos:
durations_a = []
durations_b = []
for interrupt_time in info.interrupt_times:
if interrupt_time["start"] < 100:
durations_a.append(interrupt_time["duration"])
else:
durations_b.append(interrupt_time["duration"])
data_a[key].extend(durations_a)
data_b[key].extend(durations_b)
# fill missing recordings with NaNs
maxlen_a = max([len(data_a[k]) for k in data_a.keys()])
maxlen_b = max([len(data_b[k]) for k in data_b.keys()])
for k, v in data_a.items():
data_a[k] = v + [float('nan')] * (maxlen_a - (len(v)))
for k, v in data_b.items():
data_b[k] = v + [float('nan')] * (maxlen_b - (len(v)))
# draw A
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
df = pandas.DataFrame.from_dict(data_a)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig("vis-boxplot3-split-a." + FORMAT)
# draw B
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
df = pandas.DataFrame.from_dict(data_b)
ax = sns.boxplot(palette=PALETTE_9, data=df)
#sns.swarmplot(size=2, color="0.3", linewidth=0, data=df)
ax.set(xlabel='', ylabel='# Interruptions')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig("vis-boxplot3-split-b." + FORMAT)
def visualize_distplot_duration(allInfos):
plt.figure(figsize=(10,10))
sns.set(style="ticks", palette="pastel")
data = {
"config": [],
"duration": [],
}
configs = set()
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler} - {conf.fec}".upper())
configs.add(key)
for info in infos:
for duration in info.durations:
data["config"].append(key)
data["duration"].append(duration)
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = pandas.DataFrame.from_dict(data)
ax = plt.gca()
pal = sns.cubehelix_palette(10, rot=-.25, light=.7)
g = sns.FacetGrid(df, row="config", hue="config", aspect=10, height=2, palette=pal)
g.map(sns.kdeplot, "duration", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2, clip=(0, 0.2))
g.map(sns.kdeplot, "duration", clip_on=False, color="w", lw=2, bw=.2, clip=(0, 0.2))
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Set the subplots to overlap
#g.fig.subplots_adjust(hspace=-.25)
g.despine(bottom=True, left=True)
ax.set(xlabel='', ylabel='Interruption Duration (s)')
ax.set(ylim=(0, None))
plt.savefig("vis-dist-duration." + FORMAT)
def visualize_boxplot_initial_buffering(allInfos):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = []
for x in infos:
data[key].append(x.initial_buffering)
data[key] = z_filter(data[key])
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = | pandas.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
"""
Functions for radiosonde related calculations.
"""
import warnings
import numpy as np
import pandas as pd
import xarray as xr
from act.utils.data_utils import convert_to_potential_temp
try:
from pkg_resources import DistributionNotFound
import metpy.calc as mpcalc
METPY_AVAILABLE = True
except ImportError:
METPY_AVAILABLE = False
except (ModuleNotFoundError, DistributionNotFound):
warnings.warn("MetPy is installed but could not be imported. " +
"Please check your MetPy installation. Some features " +
"will be disabled.", ImportWarning)
METPY_AVAILABLE = False
if METPY_AVAILABLE:
from metpy.units import units
def calculate_precipitable_water(ds, temp_name='tdry', rh_name='rh',
pres_name='pres'):
"""
Function to calculate precipitable water vapor from ARM sondewnpn b1 data.
Will first calculate saturation vapor pressure of all data using Arden-Buck
equations, then calculate specific humidity and integrate over all pressure
levels to give us a precipitable water value in centimeters.
ds : ACT object
Object as read in by the ACT netCDF reader.
temp_name : str
Name of temperature field to use. Defaults to 'tdry' for sondewnpn b1
level data.
rh_name : str
Name of relative humidity field to use. Defaults to 'rh' for sondewnpn
b1 level data.
pres_name : str
Name of atmospheric pressure field to use. Defaults to 'pres' for
sondewnpn b1 level data.
"""
temp = ds[temp_name].values
rh = ds[rh_name].values
pres = ds[pres_name].values
# Get list of temperature values for saturation vapor pressure calc
temperature = []
for t in np.nditer(temp):
temperature.append(t)
# Apply Arden-Buck equation to get saturation vapor pressure
sat_vap_pres = []
for t in temperature:
# Over liquid water, above freezing
if t >= 0:
sat_vap_pres.append(0.61121 * np.exp((18.678 - (t / 234.5)) *
(t / (257.14 + t))))
# Over ice, below freezing
else:
sat_vap_pres.append(0.61115 * np.exp((23.036 - (t / 333.7)) *
(t / (279.82 + t))))
# convert rh from % to decimal
rel_hum = []
for r in np.nditer(rh):
rel_hum.append(r / 100.)
# get vapor pressure from rh and saturation vapor pressure
vap_pres = []
for i in range(0, len(sat_vap_pres)):
es = rel_hum[i] * sat_vap_pres[i]
vap_pres.append(es)
# Get list of pressure values for mixing ratio calc
pressure = []
for p in np.nditer(pres):
pressure.append(p)
# Mixing ratio calc
mix_rat = []
for i in range(0, len(vap_pres)):
mix_rat.append(0.622 * vap_pres[i] / (pressure[i] - vap_pres[i]))
# Specific humidity
spec_hum = []
for rat in mix_rat:
spec_hum.append(rat / (1 + rat))
# Integrate specific humidity
pwv = 0.0
for i in range(1, len(pressure) - 1):
pwv = pwv + 0.5 * (spec_hum[i] + spec_hum[i - 1]) * (pressure[i - 1] -
pressure[i])
pwv = pwv / 0.098
return pwv
def calculate_stability_indicies(ds, temp_name="temperature",
td_name="dewpoint_temperature",
p_name="pressure",
rh_name='relative_humidity',
moving_ave_window=0):
"""
Function for calculating stability indices from sounding data.
Parameters
----------
ds : ACT dataset
The dataset to compute the stability indicies of. Must have
temperature, dewpoint, and pressure in vertical coordinates.
temp_name : str
The name of the temperature field.
td_name : str
The name of the dewpoint field.
p_name : str
The name of the pressure field.
rh_name : str
The name of the relative humidity field.
moving_ave_window : int
Number of points to do a moving average on sounding data to reduce
noise. This is useful if noise in the sounding is preventing parcel
ascent.
Returns
-------
ds : ACT dataset
An ACT dataset with additional stability indicies added.
"""
if not METPY_AVAILABLE:
raise ImportError("MetPy need to be installed on your system to " +
"calculate stability indices")
t = ds[temp_name]
td = ds[td_name]
p = ds[p_name]
rh = ds[rh_name]
if not hasattr(t, "units"):
raise AttributeError("Temperature field must have units" +
" for ACT to discern!")
if not hasattr(td, "units"):
raise AttributeError("Dewpoint field must have units" +
" for ACT to discern!")
if not hasattr(p, "units"):
raise AttributeError("Pressure field must have units" +
" for ACT to discern!")
if t.units == "C":
t_units = units.degC
else:
t_units = getattr(units, t.units)
if td.units == "C":
td_units = units.degC
else:
td_units = getattr(units, td.units)
p_units = getattr(units, p.units)
rh_units = getattr(units, rh.units)
# Sort all values by decreasing pressure
t_sorted = np.array(t.values)
td_sorted = np.array(td.values)
p_sorted = np.array(p.values)
rh_sorted = np.array(rh.values)
ind_sort = np.argsort(p_sorted)
t_sorted = t_sorted[ind_sort[-1:0:-1]]
td_sorted = td_sorted[ind_sort[-1:0:-1]]
p_sorted = p_sorted[ind_sort[-1:0:-1]]
rh_sorted = rh_sorted[ind_sort[-1:0:-1]]
if moving_ave_window > 0:
t_sorted = np.convolve(
t_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
td_sorted = np.convolve(
td_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
p_sorted = np.convolve(
p_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
rh_sorted = np.convolve(
rh_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
t_sorted = t_sorted * t_units
td_sorted = td_sorted * td_units
p_sorted = p_sorted * p_units
rh_sorted = rh_sorted * rh_units
# Calculate mixing ratio
mr = mpcalc.mixing_ratio_from_relative_humidity(
p_sorted, t_sorted, rh_sorted)
# Discussion of issue #361 use virtual temperature.
vt = mpcalc.virtual_temperature(t_sorted, mr)
t_profile = mpcalc.parcel_profile(
p_sorted, t_sorted[0], td_sorted[0])
# Calculate parcel trajectory
ds["parcel_temperature"] = t_profile.magnitude
ds["parcel_temperature"].attrs['units'] = t_profile.units
# Calculate CAPE, CIN, LCL
sbcape, sbcin = mpcalc.surface_based_cape_cin(
p_sorted, vt, td_sorted)
lcl = mpcalc.lcl(
p_sorted[0], t_sorted[0], td_sorted[0])
try:
lfc = mpcalc.lfc(
p_sorted[0], t_sorted[0], td_sorted[0])
except IndexError:
lfc = np.nan * p_sorted.units
mucape, mucin = mpcalc.most_unstable_cape_cin(
p_sorted, vt, td_sorted)
where_500 = np.argmin(np.abs(p_sorted - 500 * units.hPa))
li = t_sorted[where_500] - t_profile[where_500]
ds["surface_based_cape"] = sbcape.magnitude
ds["surface_based_cape"].attrs['units'] = "J/kg"
ds["surface_based_cape"].attrs['long_name'] = "Surface-based CAPE"
ds["surface_based_cin"] = sbcin.magnitude
ds["surface_based_cin"].attrs['units'] = "J/kg"
ds["surface_based_cin"].attrs['long_name'] = "Surface-based CIN"
ds["most_unstable_cape"] = mucape.magnitude
ds["most_unstable_cape"].attrs['units'] = "J/kg"
ds["most_unstable_cape"].attrs['long_name'] = "Most unstable CAPE"
ds["most_unstable_cin"] = mucin.magnitude
ds["most_unstable_cin"].attrs['units'] = "J/kg"
ds["most_unstable_cin"].attrs['long_name'] = "Most unstable CIN"
ds["lifted_index"] = li.magnitude
ds["lifted_index"].attrs['units'] = t_profile.units
ds["lifted_index"].attrs['long_name'] = "Lifted index"
ds["level_of_free_convection"] = lfc.magnitude
ds["level_of_free_convection"].attrs['units'] = lfc.units
ds["level_of_free_convection"].attrs['long_name'] = "Level of free convection"
ds["lifted_condensation_level_temperature"] = lcl[1].magnitude
ds["lifted_condensation_level_temperature"].attrs['units'] = lcl[1].units
ds["lifted_condensation_level_temperature"].attrs[
'long_name'] = "Lifted condensation level temperature"
ds["lifted_condensation_level_pressure"] = lcl[0].magnitude
ds["lifted_condensation_level_pressure"].attrs['units'] = lcl[0].units
ds["lifted_condensation_level_pressure"].attrs[
'long_name'] = "Lifted condensation level pressure"
return ds
def calculate_pbl_liu_liang(ds, temperature='tdry', pressure='pres', windspeed='wspd', height='alt',
smooth_height=3, land_parameter=True, llj_max_alt=1500., llj_max_wspd=2.):
"""
Function for calculating the PBL height from a radiosonde profile
using the Liu-Liang 2010 technique. There are some slight descrepencies
in the function from the ARM implementation 1.) it imposes a 1500m (keyword)
height on the definition of the LLJ and 2.) the interpolation is slightly different
using python functions
Parameters
----------
ds : xarray Dataset
Dataset housing radiosonde profile for calculations
temperature : str
The name of the temperature field.
pressure : str
The name of the pressure field.
windspeed : str
The name of the wind speed field.
height : str
The name of the height field
smooth_height : int
Number of points to do a moving average on sounding height data to reduce noise
land_parameter : boolean
Set to True if retrievals over land or false to retrievals over water
llj_max_alt : float
Maximum altitude the LLJ 2 m/s difference should be checked against
llj_max_wspd : float
Maximum wind speed threshold to use to define LLJ
Returns
-------
obj : xarray Dataset
xarray dataset with results stored in pblht_liu_liang variable
References
----------
Liu, Shuyan, and <NAME>. "Observed diurnal cycle climatology of planetary
boundary layer height." Journal of Climate 23, no. 21 (2010): 5790-5809.
<NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"Planetary boundary layer (PBL) height value added product (VAP): Radiosonde retrievals."
Department of Energy Office of Science Atmospheric Radiation Measurement (ARM) Program
(United States) (2013).
"""
time_0 = ds['time'].values
temp_0 = ds[temperature].values
ds[pressure] = ds[pressure].rolling(time=smooth_height, min_periods=1, center=True).mean()
obj = ds.swap_dims(dims_dict={'time': pressure})
for var in obj:
obj[var].attrs = ds[var].attrs
base = 5 # 5 mb base
starting_pres = base * np.ceil(float(obj[pressure].values[2]) / base)
p_grid = np.flip(np.arange(100., starting_pres + base, base))
try:
obj = obj.sel(pres=p_grid, method='nearest')
except Exception:
ds[pressure] = ds[pressure].rolling(time=smooth_height + 4, min_periods=2, center=True).mean()
obj = ds.swap_dims(dims_dict={'time': pressure})
for var in obj:
obj[var].attrs = ds[var].attrs
try:
obj = obj.sel(pres=p_grid, method='nearest')
except Exception:
raise ValueError('Sonde profile does not have unique pressures after smoothing')
# Get Data Variables
if smooth_height > 0:
alt = | pd.Series(obj[height].values) | pandas.Series |
import pytest
import collections
from pathlib import Path
import pandas as pd
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Constant, Annotator
import pypipegraph as ppg
from pypipegraph.testing import run_pipegraph, force_load
from pandas.testing import assert_frame_equal
from mbf_genomics.util import find_annos_from_column
class LenAnno(Annotator):
def __init__(self, name):
self.columns = [name]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
@pytest.mark.usefixtures("no_pipegraph")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameDirect:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_create_from_df(self):
test_df = pd.DataFrame({"A": [1, 2]})
a = DelayedDataFrame("shu", test_df)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write()[1]
assert "/sha" in str(fn.parent.absolute())
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), test_df)
def test_write_excel(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_excel2(self):
data = {}
for i in range(0, 257):
c = "A%i" % i
d = [1, 1]
data[c] = d
test_df = pd.DataFrame(data)
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_mangle(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert (a.non_annotator_columns == ["A", "B"]).all()
def mangle(df):
df = df.drop("A", axis=1)
df = df[df.B == "c"]
return df
fn = a.write("test.csv", mangle)[1]
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), mangle(test_df))
def test_magic(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
a = DelayedDataFrame("shu", lambda: test_df)
assert hash(a)
assert a.name in str(a)
assert a.name in repr(a)
def test_annotator(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("column", "value")
a.annotate()
assert "column" in a.df.columns
assert (a.df["column"] == "value").all()
def test_add_non_anno(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(TypeError):
a += 5
def test_annotator_wrong_columns(self):
class WrongConstant(Annotator):
def __init__(self, column_name, value):
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({"shu": self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(ValueError):
a += WrongConstant("column", "value")
def test_annotator_minimum_columns(self):
a = DelayedDataFrame(
"shu", lambda: | pd.DataFrame({"A": [1, 2], "B": ["c", "d"]}) | pandas.DataFrame |
from email import header
import select
from bs4 import BeautifulSoup
from selenium import webdriver
import pandas as pd
from selenium.common.exceptions import NoSuchElementException
import pickle
from connect_to_db import DatabaseConnection
from selenium.webdriver.support.ui import Select
# from cleaning_pickle import update_tables
def is_num_or_float(value):
try:
float(value)
return True
except ValueError:
return False
class Scraper:
def __init__(self) -> None:
self.status = 0
self.scrape_url = None
def set_url(self,url: str,name: str):
self.scrape_url = url
self.name = name
print("URL Changed to:",self.scrape_url," Status:",self.status)
def get_url(self) -> str:
return self.scrape_url
def get_status(self) -> int:
return self.status
def create_driver(self):
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.set_headless()
self.driver = webdriver.Firefox(firefox_options=fireFoxOptions)
def close_driver(self):
self.driver.quit()
def parse_url(self) -> None:
# html = driver.page_source
# self.source = urllib.request.urlopen(self.scrape_url)
# r = requests.get(self.scrape_url)
# print(r.text)
self.driver.get(self.scrape_url)
self.soup = BeautifulSoup(self.driver.page_source,"html.parser")
# print(self.soup)a
# print(self.soup)
def parse_html(self) -> None:
self.rows=list()
self.header = None
# html_table = self.soup.find("table",{"class":"table graph-table W(100%) Ta(start) Bdcl(c) Mb(56px) Ov(h)"})
html_table = self.driver.find_element_by_xpath("//table[@class='table graph-table W(100%) Ta(start) Bdcl(c) Mb(56px) Ov(h)']")
is_header = True
for row in html_table.find_elements_by_tag_name("tr"):
if is_header:
self.header = row
is_header = False
else:
self.rows.append(row)
# for row in html_table.findAll("tr"):
# if is_header:
# self.header = row
# is_header = False
# else:
# self.rows.append(row)
self.status += 1
def parse_stats(self, data: 'PlayerStatistics', append=False):
headers_list = list()
stats_list = list()
for col in self.header.find_elements_by_tag_name("th"):
headers_list.append(col.get_attribute('title'))
for row in self.rows:
ind_stats = list()
for col in row.find_elements_by_tag_name("th"):
ind_stats.append(col.text)
for col in row.find_elements_by_tag_name("td"):
if is_num_or_float(col.text):
ind_stats.append(float(col.text))
else:
if(col.text == '-'):
ind_stats.append(0.0)
else:
ind_stats.append(col.text)
#Add row
stats_list.append(ind_stats)
# for col in self.header.findAll("th"):
# headers_list.append(col["title"])
# for row in self.rows:
# ind_stats = list()
# for col in row.findAll("th"):
# ind_stats.append(col.text)
# for col in row.findAll("td"):
# if is_num_or_float(col.text):
# ind_stats.append(float(col.text))
# else:
# if(col.text == '-'):
# ind_stats.append(0.0)
# else:
# ind_stats.append(col.text)
# #Add row
# stats_list.append(ind_stats)
# fill dataframe
df_stats = pd.DataFrame(stats_list, columns=headers_list)
if append == False:
data.add_table(self.name,df_stats)
else:
data.tables[self.name] = | pd.concat([data.tables[self.name], df_stats], ignore_index=True) | pandas.concat |
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_transform_high_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, high_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_low_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, low_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_low_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, low_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_high_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, high_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_low_is_scalar`` variable to ``True`` and the
``_high_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- high = 'a'
- drop = None
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._strict == True
- instance._high_is_scalar = False
- instance._low_is_scalar = True
- instance._drop = None
"""
# Run
instance = Positive(high='a', strict=True, drop=None)
# Asserts
assert instance._low == 0
assert instance._high == 'a'
assert instance._strict is True
assert instance._high_is_scalar is False
assert instance._low_is_scalar is True
assert instance._drop is None
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_high_is_scalar`` variable to ``True`` and the
``_low_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- low = 'a'
- drop = None
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._high_is_scalar = True
- instance._low_is_scalar = False
- instance._drop = None
"""
# Run
instance = Negative(low='a', strict=True, drop=None)
# Asserts
assert instance._low == 'a'
assert instance._high == 0
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop is None
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``ColumnFormula.transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
| pd.testing.assert_frame_equal(expected_out, out) | pandas.testing.assert_frame_equal |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_lazy():
# A mix of test_strategybase_universe and test_strategybase_allocate
# to make sure that assets with lazy_add work correctly.
c1 = SecurityBase('c1', multiplier=2, lazy_add=True, )
c2 = FixedIncomeSecurity('c2', lazy_add=True)
s = StrategyBase('s', [c1, c2])
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
import pandas as pd
import BeautifulSoup as bs
import requests
import pickle
import os
import os.path
import datetime
import time
def promt_time_stamp():
return str(datetime.datetime.fromtimestamp(time.time()).strftime('[%H:%M:%S] '))
def get_index_tickers(list_indexes=list(), load_all=False):
tickers_all = []
path = 'indexes/'
if not os.path.exists(path):
os.mkdir(path)
if load_all:
list_indexes = ['dowjones', 'sp500', 'dax', 'sptsxc', 'bovespa', 'ftse100', 'cac40', 'ibex35',
'eustoxx50', 'sensex', 'smi', 'straitstimes', 'rts', 'nikkei', 'ssec', 'hangseng',
'spasx200', 'mdax', 'sdax', 'tecdax']
for index in list_indexes:
tickers = []
implemented = True
if os.path.isfile(path + index + '.pic'):
print(promt_time_stamp() + 'load ' + index + ' tickers from db ..')
with open(path + index + '.pic', "rb") as input_file:
for ticker in pickle.load(input_file):
tickers.append(ticker)
elif index == 'dowjones':
print(promt_time_stamp() + 'load dowjones tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average')
for ticker in r[1][2][1:].tolist():
tickers.append(ticker)
elif index == 'sp500':
print(promt_time_stamp() + 'load sp500 tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
for ticker in r[0][0][1:].tolist():
tickers.append(ticker)
elif index == 'dax':
print(promt_time_stamp() + 'load dax tickers ..')
r = pd.read_html('https://it.wikipedia.org/wiki/DAX_30')[1]
for ticker in pd.DataFrame(r)[1][1:].tolist():
tickers.append(ticker)
elif index == 'sptsxc':
print(promt_time_stamp() + 'load sptsxc tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/S%26P/TSX_Composite_Index')
for ticker in r[0][0][1:].tolist():
tickers.append(ticker)
elif index == 'bovespa':
print(promt_time_stamp() + 'load bovespa tickers ..')
r = pd.read_html('https://id.wikipedia.org/wiki/Indeks_Bovespa')
for ticker in r[0][1][1:].tolist():
tickers.append(ticker)
elif index == 'ftse100':
print(promt_time_stamp() + 'load ftse100 tickers ..')
r = | pd.read_html('https://en.wikipedia.org/wiki/FTSE_100_Index') | pandas.read_html |
import datetime
import fileinput
import glob
import gzip
import multiprocessing
import os
import random # for log file names
import re
import shutil
import subprocess
import sys
import time
import urllib as ul # for removing url style encoding from gff text notes
from pathlib import Path
import configargparse
import pandas as pd
import pandasql as ps
import pysam # sequence format specific module fastq/bam/sam...
import gffpandas.gffpandas as gffpd # annotation format specific module gff3
from fuzzysearch import find_near_matches
pimms_mssg = """
===========================================================================================================
Pragmatic Insertional Mutation Mapping system (PIMMS) mapping pipeline v2
===========================================================================================================
o o
o o
// //
// //
|_||_| |_||_| @@@@@ @@@@@@ @@ @@ @@ @@ @@@@@@ @@@@@@
|@||@| |@||@| @@ @@ @@ @@@@ @@@@ @@@@ @@@@ @@ @@ @@
|@||@| |@||@| @@@@@ @@ @@ @@@ @@ @@ @@@ @@ @@@ @@
|@||@| |@||@| @@ @@ @@ @ @@ @@ @ @@ @@@ @@
|@@@@| |@@@@| @@ @@ @@ @@ @@ @@ @@ @@
|@@@@| |@@@@| @@ @@@@@@ @@ @@ @@ @@ @@@@@@@ @@@@@@@@
===========================================================================================================
PIMMS2 """
pimms_mssg2 = """ mode
===========================================================================================================
"""
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
def create_folder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
def make_results_dirs_in_sam_dir(samfile_path, run_label):
samdirname = os.path.dirname(samfile_path)
results_dir_db = os.path.join(samdirname, run_label + '_out_dashboard')
results_dir_info = os.path.join(samdirname, run_label + '_out_info')
# results_dir_kept = os.path.join(samdirname, run_label + '_out_kept')
if not os.path.exists(results_dir_db):
try:
os.makedirs(results_dir_db)
os.makedirs(results_dir_info)
except OSError:
print("Error while creating result dirs in {samdirname}")
else:
results_dir_db = os.path.join(samdirname, run_label + time.strftime("_%d%m%y_%H%M%S") + '_results_dashboard')
results_dir_info = os.path.join(samdirname, run_label + time.strftime("_%d%m%y_%H%M%S") + '_results_info')
try:
os.makedirs(results_dir_db)
os.makedirs(results_dir_info)
except OSError:
print("Error while creating incremented result dirs in {samdirname}")
return samdirname, results_dir_db, results_dir_info
def delete_file_list(file_list):
for file_path in file_list:
try:
os.remove(file_path)
except OSError:
print("Error while deleting file {filePath}")
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if not os.path.exists(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise configargparse.ArgumentTypeError("{0} does not exist".format(x))
return x
def prog_in_path_check(prog_to_check):
if shutil.which(prog_to_check):
print('required mapper is in the path : ' + prog_to_check)
else:
sys.exit('\nERROR: ' + prog_to_check +
' cannot be found in the path. \nSYS.EXIT: Please check your environment and ensure ' + prog_to_check +
' is installed and available before trying again.\n\n')
def concat_fastq_raw(flanking_fastq_list, label, fq_file_suffix, concat_out_dir):
concat_fastq_result_filename = os.path.join(concat_out_dir, label + '_RX_concat' + fq_file_suffix + '.gz')
print(concat_fastq_result_filename)
print(" ".join(flanking_fastq_list))
with gzip.open(concat_fastq_result_filename, "wt", compresslevel=6) as big_file:
with fileinput.input(files=flanking_fastq_list) as inputs:
for line in inputs:
big_file.write(line)
if not parsed_args[0].keep:
print('Removing intermediate fastq flanking reads files')
# print(flanking_fastq_list)
delete_file_list(flanking_fastq_list)
return concat_fastq_result_filename
############################
# FIND_FLANK FUNCTIONS:
############################
def find_read_files_with_glob(indir, wildcards):
for suffix_wc in wildcards:
read_files = glob.glob(indir + suffix_wc)
if len(read_files):
return read_files
sys.exit("SYS EXIT: unable to find read files, check file suffixes match permissible: " + wildcards + '\n')
def merge_logs(log_path):
log_files = glob.glob(os.path.join(log_path, "log_*txt"))
df_from_each_log = (pd.read_table(f) for f in log_files)
merged_logs_df = pd.concat(df_from_each_log, ignore_index=True)
merged_logs_df = merged_logs_df.sort_values(by=['fq_filename'])
log_sums = merged_logs_df.sum(numeric_only=True)
log_sums['fq_filename'] = 'COMBINED'
merged_logs_df = merged_logs_df.append(log_sums, ignore_index=True)
merged_logs_df.to_csv(os.path.join(log_path, "..", 'result_summary.txt'), sep='\t', index=False)
print(merged_logs_df.to_string(index=False))
return merged_logs_df
def run_minimap2(flanking_fastq_concat_result, sam_output_result, genome_fasta):
stream = os.popen('minimap2 --version')
output = stream.read()
print('calling minimap version: ' + output)
# process = subprocess.Popen(['minimap2', '--version'],
print(' '.join(['minimap2', '-x', 'sr', '-a',
'-y', # -y adds fastq comment to sam?
'-o', sam_output_result, genome_fasta, flanking_fastq_concat_result,
'--secondary=no', '--sam-hit-only']))
if parsed_args[0].nano:
mm_mode = 'map-ont'
else:
mm_mode = 'sr'
process = subprocess.Popen(
['minimap2', '-x', mm_mode,
'-a',
'-y', # -y adds fastq comment to sam
'-o', sam_output_result,
genome_fasta,
flanking_fastq_concat_result,
'--secondary=no', '--sam-hit-only'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
def run_bwa(flanking_fastq_concat_result, sam_output_result, genome_fasta, ncpus):
bwa_index_dir = Path(genome_fasta).stem + '_index'
if not os.path.exists(os.path.join(bwa_index_dir, Path(genome_fasta).name + '.sa')):
print('Creating BWA index...')
create_folder(bwa_index_dir)
fasta_to_index = os.path.join(bwa_index_dir, Path(genome_fasta).name)
shutil.copyfile(genome_fasta, fasta_to_index)
process = subprocess.Popen(
['bwa', 'index',
fasta_to_index],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
else:
print('Using existing BWA index...')
print(' '.join(['bwa', 'mem', genome_fasta, flanking_fastq_concat_result, sam_output_result]))
# with open(sam_output_result, 'w') as f:
process = subprocess.Popen(
['bwa', 'mem',
'-t', str(ncpus), "-C",
'-o', sam_output_result,
os.path.join(bwa_index_dir, Path(genome_fasta).name),
flanking_fastq_concat_result],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
def py_sam_to_bam(sam_output_result):
bam_output_result = re.sub('.sam', '.bam', sam_output_result)
# add?? line to remove unmapped readsbased on: | samtools view -F 4 -o onlyMapped.bam ??
# noinspection PyUnresolvedReferences
pysam.sort('-O' 'BAM', "-o", bam_output_result, sam_output_result)
# noinspection PyUnresolvedReferences
pysam.index(bam_output_result)
print('\nMapping stats (flagstat):\n')
# noinspection PyUnresolvedReferences
for fsline in pysam.flagstat(bam_output_result).splitlines()[:5]:
print(fsline)
print('\n\n')
# if parsed_args[0].rmfiles:
if not parsed_args[0].keep:
delete_file_list([sam_output_result])
return bam_output_result
def pimms_fastq(fq_filename, fqout_filename, out_dir_logs, nano):
trans = str.maketrans('ATGCN', 'TACGN') # complement DNA lookup
qry1 = parsed_args[0].motif1[0].strip("'\"")
qry2 = parsed_args[0].motif2[0].strip("'\"")
# print(str(qry1))
# print(str(qry2))
# revcomp using maketrans lookup and a string reverse
qry1rc = qry1.translate(trans)[::-1] # reverse complement transposon motif1 ([::-1] -> reverse)
qry2rc = qry2.translate(trans)[::-1] # reverse complement transposon motif2
# print(str(qry1rc))
# print(str(qry2rc))
# if parsed_args[0].nano: # nano == True
if nano: # nano == True
# parsed_args[0].noreps = True
# print('nano == True\n')
fuzzy_levenshtein = True
l_dist = parsed_args[0].lev[0] # maximum Levenshtein Distance
# min_length = 50
# max_length = 200
min_length = parsed_args[0].min[0]
max_length = parsed_args[0].max[0]
qual_char = parsed_args[0].qual_char
print('Nanopore appropriate settings: Levenshtein distance of ' + str(l_dist)
+ ' + sequence length min = ' + str(min_length) + ', max = ' + str(max_length))
else:
# print('nano == False\n')
# fuzzy_levenshtein = False
subs = parsed_args[0].sub[0]
l_dist = parsed_args[0].lev[0] # maximum Levenstein Distance
fuzzy_levenshtein = bool(l_dist)
insrt = parsed_args[0].insert[0]
dels = parsed_args[0].deletion[0]
min_length = parsed_args[0].min[0]
max_length = parsed_args[0].max[0]
# print('standard settings\n')
print('illumina settings: Levenshtein distance of ' + str(l_dist)
+ ' + sequence length min = ' + str(min_length) + ', max = ' + str(max_length))
count = 0
countq1 = 0
countq2 = 0
countq1q2 = 0
countq1rc = 0
countq2rc = 0
# countq1rcq2rc = 0
hit_but_short_q1_q2 = 0
hit_q1_q2 = 0
countq2rcq1rc = 0
hit_but_short_q2rc_q1rc = 0
hit_q2rc_q1rc = 0
wrongq2q1 = 0
wrongq1rcq2rc = 0
countqqrc = 0
countqmulti = 0
hit_but_short_q1_only = 0
hit_q1_only = 0
hit_but_short_q1rc_only = 0
hit_q1rc_only = 0
hit_but_short_q2_only = 0
hit_q2_only = 0
hit_but_short_q2rc_only = 0
hit_q2rc_only = 0
# is_contam = 0
# reject_reads_list = []
# reject_reads_dict = dict()
# To resolve/reharmonise: diferent processing code for nanopore and Illumina input files
if nano:
with pysam.FastxFile(fq_filename, persist=False) as fin, open(fqout_filename, mode='wt') as fout:
print(fq_filename, ' ==>\n\t##\t##\t', fqout_filename, '\n')
for entry in fin:
count += 1
# print(str(count) + '\n')
if not fuzzy_levenshtein:
# print('find_near_matches \n')
matchesq1 = find_near_matches(qry1, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq2 = find_near_matches(qry2, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq1rc = find_near_matches(qry1rc, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq2rc = find_near_matches(qry2rc, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
else:
# print('find_near_matches lev\n')
matchesq1 = find_near_matches(qry1, entry.sequence, max_l_dist=l_dist)
matchesq2 = find_near_matches(qry2, entry.sequence, max_l_dist=l_dist)
matchesq1rc = find_near_matches(qry1rc, entry.sequence, max_l_dist=l_dist)
matchesq2rc = find_near_matches(qry2rc, entry.sequence, max_l_dist=l_dist)
if not bool(matchesq1 + matchesq2 + matchesq1rc + matchesq2rc):
# print(matchesq1 + matchesq2 + matchesq1rc + matchesq1rc)
# reject_reads_dict.update({entry.name: 'nomatch'})
continue
# skip fastq entry if multiple matches to same motif query seq
if len(matchesq1) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq2) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq1rc) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq2rc) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
# skip fastq entry if multiple matches to same motif query direct and reverse complement
if (len(matchesq1) == 1) and (len(matchesq1rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
if (len(matchesq2) == 1) and (len(matchesq2rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
# or matches to two incompatible motifs
if (len(matchesq1) == 1) and (len(matchesq2rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
if (len(matchesq2) == 1) and (len(matchesq1rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
# process motif match pairs to extract target sequences
if (len(matchesq1) == 1) and (len(matchesq2) == 1):
countq1q2 += 1
captured_seqstring = str(entry.sequence)[matchesq1[0].end:matchesq2[0].start]
captured_qualstring = str(entry.quality)[matchesq1[0].end:matchesq2[0].start]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if matchesq2[0].start <= matchesq1[0].end:
wrongq2q1 += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'ooorder'})
continue
if len(captured_seqstring) >= min_length:
hit_q1_q2 += 1
# print('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q1_q2 += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# break
if (len(matchesq1rc) == 1) and (len(matchesq2rc) == 1):
countq2rcq1rc += 1
captured_seqstring = str(entry.sequence)[matchesq2rc[0].end:matchesq1rc[0].start]
captured_qualstring = str(entry.quality)[matchesq2rc[0].end:matchesq1rc[0].start]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if matchesq1rc[0].start <= matchesq2rc[0].end:
wrongq1rcq2rc += 1
# reject_reads_dict.update({entry.name: 'ooorder'})
if len(captured_seqstring) >= min_length:
hit_q2rc_q1rc += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q2rc_q1rc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# process single motif matches to extract target sequences
if len(matchesq1) == 1:
countq1 += 1
captured_seqstring = str(entry.sequence)[
matchesq1[0].end:] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
matchesq1[0].end:]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if len(captured_seqstring) >= min_length:
hit_q1_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q1_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq2rc) == 1:
countq2rc += 1
captured_seqstring = str(entry.sequence)[
matchesq2rc[0].end:] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
matchesq2rc[0].end:]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if len(captured_seqstring) >= min_length:
hit_q2rc_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q2rc_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq1rc) == 1:
countq1rc += 1
captured_seqstring = str(entry.sequence)[
0:matchesq1rc[0].start] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
0:matchesq1rc[0].start]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if len(captured_seqstring) >= min_length:
hit_q1rc_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[-max_length:].translate(trans)[::-1] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[-max_length:][::-1] + '\n')
continue
else:
hit_but_short_q1rc_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq2) == 1:
countq2 += 1
captured_seqstring = str(entry.sequence)[
0:matchesq2[0].start] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
0:matchesq2[0].start]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if len(captured_seqstring) >= min_length:
hit_q2_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[-max_length:].translate(trans)[::-1] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[-max_length:][::-1] + '\n')
continue
else:
hit_but_short_q2_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
else:
with pysam.FastxFile(fq_filename, persist=False) as fin, open(fqout_filename, mode='wt') as fout:
print(fq_filename, ' ==>\n\t\t\t', fqout_filename, '\n')
for entry in fin:
count += 1
if not fuzzy_levenshtein:
# print('find_near_matches \n')
matchesq1 = find_near_matches(qry1, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq2 = find_near_matches(qry2, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq1rc = find_near_matches(qry1rc, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq2rc = find_near_matches(qry2rc, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
else:
# print('find_near_matches lev\n')
matchesq1 = find_near_matches(qry1, entry.sequence, max_l_dist=l_dist)
matchesq2 = find_near_matches(qry2, entry.sequence, max_l_dist=l_dist)
matchesq1rc = find_near_matches(qry1rc, entry.sequence, max_l_dist=l_dist)
matchesq2rc = find_near_matches(qry2rc, entry.sequence, max_l_dist=l_dist)
if not bool(matchesq1 + matchesq2 + matchesq1rc + matchesq2rc):
# print(matchesq1 + matchesq2 + matchesq1rc + matchesq1rc)
# reject_reads_dict.update({entry.name: 'nomatch'})
continue
# skip fastq entry if multiple matches to same motif query seq
if len(matchesq1) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq2) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq1rc) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq2rc) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
# skip fastq entry if multiple matches to same motif query direct and reverse complement
if (len(matchesq1) == 1) and (len(matchesq1rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
if (len(matchesq2) == 1) and (len(matchesq2rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
# or matches to two incompatible motifs
if (len(matchesq1) == 1) and (len(matchesq2rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
if (len(matchesq2) == 1) and (len(matchesq1rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
# process motif match pairs to extract target sequences
if (len(matchesq1) == 1) and (len(matchesq2) == 1):
countq1q2 += 1
captured_seqstring = str(entry.sequence)[matchesq1[0].end:matchesq2[0].start]
captured_qualstring = str(entry.quality)[matchesq1[0].end:matchesq2[0].start]
if matchesq2[0].start <= matchesq1[0].end:
wrongq2q1 += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'ooorder'})
continue
if len(captured_seqstring) >= min_length:
hit_q1_q2 += 1
# print('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q1_q2 += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# break
if (len(matchesq1rc) == 1) and (len(matchesq2rc) == 1):
countq2rcq1rc += 1
captured_seqstring = str(entry.sequence)[matchesq2rc[0].end:matchesq1rc[0].start]
captured_qualstring = str(entry.quality)[matchesq2rc[0].end:matchesq1rc[0].start]
if matchesq1rc[0].start <= matchesq2rc[0].end:
wrongq1rcq2rc += 1
# reject_reads_dict.update({entry.name: 'ooorder'})
if len(captured_seqstring) >= min_length:
hit_q2rc_q1rc += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q2rc_q1rc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# process single motif matches to extract target sequences
if len(matchesq1) == 1:
countq1 += 1
captured_seqstring = str(entry.sequence)[
matchesq1[0].end:] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
matchesq1[0].end:]
if len(captured_seqstring) >= min_length:
hit_q1_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q1_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq2rc) == 1:
countq2rc += 1
captured_seqstring = str(entry.sequence)[
matchesq2rc[0].end:] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
matchesq2rc[0].end:]
if len(captured_seqstring) >= min_length:
hit_q2rc_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q2rc_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq1rc) == 1:
countq1rc += 1
captured_seqstring = str(entry.sequence)[
0:matchesq1rc[0].start] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
0:matchesq1rc[0].start]
if len(captured_seqstring) >= min_length:
hit_q1rc_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[-max_length:].translate(trans)[::-1] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[-max_length:][::-1] + '\n')
continue
else:
hit_but_short_q1rc_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq2) == 1:
countq2 += 1
captured_seqstring = str(entry.sequence)[
0:matchesq2[0].start] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
0:matchesq2[0].start]
if len(captured_seqstring) >= min_length:
hit_q2_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[-max_length:].translate(trans)[::-1] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[-max_length:][::-1] + '\n')
continue
else:
hit_but_short_q2_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# print("\n" + fq_filename + " ->>\n" + fqout_filename + "#####################@@@@@@@@@@@@@@@@@@@@\n")
# print('#######################################################~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~log1\n')
# very cryptic logging needs reorganising and fixing to work with multiprocessing
# note added random int to get almost unique log file names need to find fix
log_file = open(
f'{out_dir_logs}/log_{os.getppid()}_{multiprocessing.current_process().pid}_{random.randint(1000, 9999)}.txt', 'w'
)
try:
# print(fq_filename, fqout_filename, 'logfail2\n')
log_file.write(
f'fq_filename\tread count:\tmultiple copies of a motif:\tmismatched motifs:\tboth motifs (fwd|revcomp):\t'
'both motifs (fwd|revcomp) >= {min_length}:\tsingle motif >= {min_length}:\ttotal passed\n')
log_file.write(f'{os.path.basename(fq_filename)}\t')
log_file.write(f'{count}\t')
log_file.write(f'{countqmulti}\t')
log_file.write(f'{countqqrc}\t')
log_file.write(
f'{hit_q1_q2 + hit_q2rc_q1rc + hit_but_short_q1_q2 + hit_but_short_q2rc_q1rc}\t')
log_file.write(f'{hit_q1_q2 + hit_q2rc_q1rc}\t')
log_file.write(f'{hit_q1_only + hit_q2_only + hit_q1rc_only + hit_q2rc_only}\t')
log_file.write(f'{hit_q1_only + hit_q2_only + hit_q1rc_only + hit_q2rc_only + hit_q1_q2 + hit_q2rc_q1rc}\n')
except Exception as e:
# Write problems to the error file
log_file.write(f'ERROR: {e} problem with motif matching to {fq_filename}!\n')
finally:
# Close the files!
log_file.close()
# def survey_fastq(resultx_reads_list, resultx_reads_dict, fqout):
def survey_fastq(resultx_reads_list, fqout):
with pysam.FastxFile(fqout, persist=False) as fh:
for entry in fh:
resultx_reads_list.append(entry.name)
# resultx_reads_dict[entry.name] = len(str(entry.sequence))
############################
# FIND_FLANK FUNCTIONS end
############################
############################
# SAM_COORDS FUNCTIONS:
############################
def process_gff(gff_file, gff_feat_type, gff_extra, rdir):
annotation = gffpd.read_gff3(gff_file)
annotation = annotation.filter_feature_of_type(gff_feat_type)
gff_stem, gff_ext = os.path.splitext(os.path.basename(gff_file))
if gff_feat_type[0] == "pseudogene":
annotation.to_gff3(os.path.join(rdir, gff_stem + '_pimms_features_pseudogene.gff'))
else:
annotation.to_gff3(os.path.join(rdir, gff_stem + '_pimms_features.gff'))
# break 9th gff column key=value pairs down to make additional columns
attr_to_columns = annotation.attributes_to_columns()
if attr_to_columns.empty:
# return empty dataframes if no rows of required type are found print('attr_to_columns is empty!')
return attr_to_columns, attr_to_columns
attr_to_columns = attr_to_columns.assign(
feat_length=(attr_to_columns.end - attr_to_columns.start + 1)).dropna(axis=1,
how='all').drop(columns=['attributes'])
data_top = attr_to_columns.head()
print(data_top)
# remove RFC 3986 % encoding from product (gff3 attribute)
# attr_to_columns = attr_to_columns.assign(product_nopc=attr_to_columns['product'].apply(ul.parse.unquote)).drop(
# columns=['product']).rename(columns={'product_nopc': 'product'})
# attr_to_columns['product'] = attr_to_columns['product'].apply(ul.parse.unquote)
if 'product' not in attr_to_columns:
attr_to_columns['product'] = '-'
else:
attr_to_columns['product'] = attr_to_columns['product'].fillna('').astype(str).apply(ul.parse.unquote) # added fix for None datatype
# fix to skip requested extra gff annotation field if not present in GFF
drop_gff_extra = []
for field in gff_extra:
if field not in attr_to_columns:
print("Warning: Unable to find '" + field + "' in " + str(gff_file) + ' file, continuing...')
drop_gff_extra.append(field)
gff_extra = [item for item in gff_extra if item not in drop_gff_extra]
# Remove URL character encoding from columns (skipping translation if present as this breaks the decoding
for field in gff_extra:
if field == 'translation':
continue
else:
attr_to_columns[field] = attr_to_columns[field].fillna('').astype(str).apply(ul.parse.unquote) # added fix for None datatype
gff_columns_addback = attr_to_columns[['seq_id',
'ID', # additional hopefully unique feature ID
'locus_tag',
'type',
'gene',
'start',
'end',
'feat_length',
'product'] + gff_extra].copy() # add extra fields from gff
# fix to remove na values and allow joining with processed data also processed with fillna to allow group_by usage
# note .copy on previous line
gff_columns_addback.fillna('', inplace=True)
data_top = gff_columns_addback.head()
print(data_top)
data_top2 = attr_to_columns.head()
print(data_top2)
return gff_columns_addback, attr_to_columns
# end process_gff()
def modify_sam_stem(sam_file, min_depth_cutoff, fraction_mismatch):
sam_stem, sam_ext = os.path.splitext(os.path.basename(sam_file))
sam_stem: str = sam_stem + '_md' + str(min_depth_cutoff) + '_mm' + str(fraction_mismatch or '0')
return sam_stem
def process_sam(sam_file, min_depth_cutoff, fraction_mismatch):
sam_stem = modify_sam_stem(sam_file, min_depth_cutoff, fraction_mismatch)
samfile = pysam.AlignmentFile(sam_file) # without , "rb" should auto detect sam or bams
open(sam_stem + ".bed", 'w').close()
f = open(sam_stem + ".bed", "a")
strand = ["+", "-"]
for read in samfile.fetch():
if read.is_unmapped:
continue
read_str = strand[int(read.is_reverse)]
read_bed = [read.reference_name, read.pos, read.reference_end, ".", read.mapping_quality, read_str, '# ' + read.query_name] # read group added
f.write('\t'.join([str(i) for i in read_bed]))
f.write('\n')
f.close()
print('#BED')
samfile.close()
samfile = pysam.AlignmentFile(sam_file)
open(sam_stem + "_insert_coords.txt", 'w').close()
f2 = open(sam_stem + "_insert_coords.txt", "a")
f2.write('\t'.join([str(i) for i in ['ref_name', 'coord', 'strand', 'read_name', 'read_grp', 'read_comment']]))
# f2.write('\t'.join([str(i) for i in ['ref_name', 'coord', 'strand', 'read_name', 'read_grp']]))
f2.write('\n')
strand = ["+", "-"]
for read in samfile.fetch():
if read.is_unmapped:
continue
# print(read.query_name + '.')
# continue
nm_value = read.get_tag('NM')
if fraction_mismatch: # and NM_value > 0:
if (read.query_alignment_length * fraction_mismatch[0]) > nm_value:
continue
read_str = strand[int(read.is_reverse)] # coverts is_reverse boolean into + or - strings
# print(STR)
# continue
if read_str == '+':
read_coords = [read.reference_name, (read.reference_start + 4), read_str, '# ' + read.query_name, ':'.join(read.query_name.split(':', 4)[:4]),
read.get_tag("CO").split(":")[-1]] # add fq comment sample id number
# ':'.join(read.query_name.split(':', 4)[:4])]
f2.write('\t'.join([str(i) for i in read_coords]))
f2.write('\n')
if read_str == '-':
read_coords = [read.reference_name, (read.reference_end - 4), read_str, '# ' + read.query_name, ':'.join(read.query_name.split(':', 4)[:4]),
read.get_tag("CO").split(":")[-1]] # add fq comment sample id number
# ':'.join(read.query_name.split(':', 4)[:4])]
f2.write('\t'.join([str(i) for i in read_coords]))
f2.write('\n')
f2.close()
samfile.close()
print('#COORDS')
# end process_sam()
def seqid_consistancy_check(mygffcolumns, my_sam):
af = pysam.AlignmentFile(my_sam)
sam_seq_id_list = [name['SN'] for name in af.header['SQ']]
gff_seq_id_list = mygffcolumns.seq_id.unique().tolist()
sam_seq_id_list.sort()
gff_seq_id_list.sort()
if sam_seq_id_list == gff_seq_id_list:
print('GFF & mapping reference sequence IDs match')
elif parsed_args[0].gff_force:
print('\nWARNING: GFF & mapping reference sequence IDs are inconsistent. \n' +
'sequence ID mismatch overridden by --gff_force\ngff:\n' +
str(gff_seq_id_list) + '\nsam/bam:\n' + str(sam_seq_id_list) + '\n')
else:
sys.exit(
'\nERROR: GFF & mapping reference sequence IDs are inconsistent. \n' +
'SYS.EXIT: Please check and update the sequence IDs in your sequence and gff files so they match up before running again.\ngff:\n' +
str(gff_seq_id_list) + '\nsam/bam:\n' + str(sam_seq_id_list) + '\n' +
'NOTE: If the sequence ID mismatch is benign e.g. an extra plasmid/contig, override by using --gff_force with bam_extract/full_process\n')
print(type(sam_seq_id_list))
print(type(gff_seq_id_list))
print(sam_seq_id_list)
print(gff_seq_id_list)
def coordinates_to_features_reps(sam_stem, attr_to_columns, condition_label):
coord_reps_df = pd.read_csv(sam_stem + "_insert_coords.txt", sep='\t', dtype={'ref_name': "str",
'coord': "int64",
'read_comment': "str",
# adding miseq support
'read_grp': "str"})
read_grps = sorted(coord_reps_df.read_grp.unique())
read_comments = sorted(coord_reps_df.read_comment.unique())
if (len(read_comments) > 20):
print(
"Warning: Unable to resolve different samples in fastq/sam/bam data (apparently too many?), continuing without replicate insertion counts" +
"\nNote: This may be due to old style Illumina header lines" +
"\n if this is an error the software will need updating to recognise different fastq header formatting conventions\n")
# returning an empty dataframe
return pd.DataFrame()
print(str(len(read_grps)) + ' readgroups found:')
print('\n'.join(read_grps))
print(str(len(read_comments)) + ' sample comments found:')
print(', '.join(read_comments))
if (len(read_grps) < len(read_comments)) & (len(read_comments) >= 3):
coord_counts_reps_df = coord_reps_df.drop('read_grp', 1).rename(columns={"read_comment": 'sample_info'},
inplace=False).groupby(["ref_name",
"coord",
'sample_info']).size().reset_index(
name=condition_label + '_') # adding miseq support
print(str(len(read_comments)) + " sample replicates/mutant pools established")
# print(coord_counts_reps_df.head())
elif (len(read_grps) >= 3) & (len(read_comments) < len(read_grps)):
coord_counts_reps_df = coord_reps_df.drop('read_comment', 1).rename(columns={"read_grp": 'sample_info'},
inplace=False).groupby(["ref_name",
"coord",
"sample_info"]).size().reset_index(
name=condition_label + '_') # adding miseq support
print(str(len(read_grps)) + " sample replicates/mutant pools established")
# print(coord_counts_reps_df.head())
# if max(len(read_grps), len(read_comments)) < 3:
else:
print(
"Warning: Unable to resolve >= 3 samples in fastq/sam/bam data, continuing without replicate insertion counts" +
"\nN.B: If this is an error the software may need updating to recognise novel fastq naming conventions")
# returning an empty dataframe
return pd.DataFrame()
coord_df_pivot = coord_counts_reps_df.copy(deep=False).pivot_table(index=["ref_name", "coord"],
columns=['sample_info'],
values=[condition_label + '_'],
fill_value=0).reset_index()
coord_df_pivot.columns = [''.join(col).strip() for col in coord_df_pivot.columns.values]
sample_grps = sorted(coord_counts_reps_df.sample_info.unique())
old_rep_names = [condition_label + '_' + str(x) for x in sample_grps]
new_rep_names = [condition_label + '_' + "MP" + str(x) for x in range(1, len(sample_grps) + 1)]
coord_df_pivot.rename(columns=dict(zip(old_rep_names, new_rep_names)), inplace=True)
attr_to_columns_short = attr_to_columns[["seq_id", "start", "end"]]
sqlcode = '''
select coord_df_pivot.*
,attr_to_columns_short.*
from attr_to_columns_short
left join coord_df_pivot
on coord_df_pivot.coord between attr_to_columns_short.start and attr_to_columns_short.end
where coord_df_pivot.ref_name like '%' || attr_to_columns_short.seq_id || '%'
'''
# wierd sqlite concatenation + >> || , '%' == wildcard double check effect of this
# this line should allow multi contig files
mp_coords_join_gff = ps.sqldf(sqlcode, locals())
# remove first 2 columns ref_name, coord sums the rest according to the feature coordinate groups
mp_reps_feature_counts = mp_coords_join_gff.drop(mp_coords_join_gff.columns[[0, 1]], axis=1).groupby(
['seq_id', 'start', 'end']).agg(["sum"]).reset_index()
mp_reps_feature_counts.columns = mp_reps_feature_counts.columns.get_level_values(0)
return mp_reps_feature_counts
def coordinates_to_features(sam_stem, attr_to_columns, gff_columns_addback, condition_label, min_depth_cutoff,
gff_extra, db_rdir):
coord_df = pd.read_csv(sam_stem + "_insert_coords.txt", sep='\t', dtype={'ref_name': "str", 'coord': "int64"})
coord_counts_df = coord_df.groupby(['ref_name', 'coord']).size().reset_index(name='counts')
# print(coord_counts_df.head())
number_of_insertion_sites = len(coord_counts_df)
number_of_reads_mapped = coord_counts_df['counts'].sum()
min_reads_at_site = coord_counts_df['counts'].min()
max_reads_at_site = coord_counts_df['counts'].max()
median_reads_at_site = round(coord_counts_df['counts'].median(), 2)
mean_insertion_site_depth = round(number_of_reads_mapped / number_of_insertion_sites, 2)
coord_counts_df = coord_counts_df[coord_counts_df['counts'] >= min_depth_cutoff]
# format insertion site info as a GFF
coord_counts_df_pimms2_gff = coord_counts_df.reset_index()
coord_counts_df_pimms2_gff['source'] = 'pimms2'
coord_counts_df_pimms2_gff['feature_type'] = 'misc_feature'
coord_counts_df_pimms2_gff['strand'] = '.'
coord_counts_df_pimms2_gff['phase'] = '.'
coord_counts_df_pimms2_gff['stop'] = coord_counts_df_pimms2_gff['coord']
coord_counts_df_pimms2_gff = coord_counts_df_pimms2_gff.rename(columns={'counts': 'score', 'coord': 'start'})
coord_counts_df_pimms2_gff['info'] = 'note=insertion;'
coord_counts_df_pimms2_gff = coord_counts_df_pimms2_gff[
['ref_name', 'source', 'feature_type', 'start', 'stop', 'score', 'strand', 'phase', 'info']]
print(coord_counts_df_pimms2_gff.head())
coord_counts_df_pimms2_gff.to_csv(os.path.join(db_rdir, condition_label + "_pimms_insert_coordinates" + ".gff"), index=False, sep='\t',
header=False)
# added .loc to fix warning
# SettingWithCopyWarning:
# A value is trying to be set on a copy of a slice from a DataFrame.
# Try using .loc[row_indexer,col_indexer] = value instead
# coord_counts_df = \
coord_counts_df.loc[:, 'between_insertion_gap'] = coord_counts_df.groupby(['ref_name'])['coord'].diff()
# coord_counts_df = coord_counts_df.loc[:, 'between_insertion_gap'] = coord_counts_df['coord'].diff()
# coord_counts_gt1_df = coord_counts_gt1_df({'between_insertion_gap': 0})
min_between_insertion_gap = coord_counts_df['between_insertion_gap'].min()
max_between_insertion_gap = coord_counts_df['between_insertion_gap'].max()
median_between_insertion_gap = coord_counts_df['between_insertion_gap'].median()
mean_between_insertion_gap = round(coord_counts_df['between_insertion_gap'].mean(), 2)
# attr_to_columns.to_csv("attr_to_columns" + ".txt", index=False, sep='\t', header=True)
sqlcode = '''
select coord_counts_df.*
,attr_to_columns.*
from attr_to_columns
left join coord_counts_df
on coord_counts_df.coord between attr_to_columns.start and attr_to_columns.end
where coord_counts_df.ref_name like '%' || attr_to_columns.seq_id || '%'
'''
# wierd sqlite concatenation + >> || , '%' == wildcard double check effect of this
# this line should allow multi contig files
coords_join_gff = ps.sqldf(sqlcode, locals())
# debugging save of intermediate data
# coords_join_gff.to_csv("pimms_coords_join_gffstuff" + condition_label + ".txt", index=False, sep='\t', header=False)
# quick dataframe summary
# coords_join_gff.count
# add position as percentile (needs manual confirmation)
coords_join_gff = coords_join_gff.assign(
# python/pandas implementation of PIMMS.pl code to derive insert position as percentile of gene length
# sprintf("%.1f", ((($in-$in_start)+1) / ($in_length/100)));
# sprintf("%.1f", ((($in_stop-$ in) + 1) / ($in_length / 100)));
posn_as_percentile=(((coords_join_gff.coord - coords_join_gff.start) + 1) / (
coords_join_gff.feat_length / 100)).where(
coords_join_gff.strand == '+', ((coords_join_gff.end - coords_join_gff.coord) + 1) / (
coords_join_gff.feat_length / 100))).round({"posn_as_percentile": 1})
print(list(attr_to_columns.columns.values))
# Important fix group by doesn't work -- any rows with nan values get dropped *yikes very bad!!!!*
coords_join_gff.fillna('', inplace=True)
pimms_result_table = coords_join_gff.groupby(
['seq_id', 'ID', # added ID field as unique identifier
'locus_tag', 'type', 'gene', 'start', 'end', 'feat_length', 'product'] + gff_extra).agg(
num_insertions_mapped_per_feat=('counts', 'sum'),
num_insert_sites_per_feat=('counts', 'count'),
first_insert_posn_as_percentile=('posn_as_percentile', 'min'),
last_insert_posn_as_percentile=('posn_as_percentile', 'max')
).reset_index()
# test diagnostic files
#pimms_result_table.to_csv("pimms_coords_join_prt1_" + condition_label + ".txt", index=False, sep='\t', header=True)
pimms_result_table = pimms_result_table.assign(num_insert_sites_per_feat_per_kb=(
(pimms_result_table.num_insert_sites_per_feat / pimms_result_table.feat_length) * 1000),
NRM_score=((pimms_result_table.num_insertions_mapped_per_feat / (
pimms_result_table.feat_length / 1000)) / (
number_of_reads_mapped / 1e6)),
NIM_score=((pimms_result_table.num_insert_sites_per_feat / (
pimms_result_table.feat_length / 1000)) / (
number_of_reads_mapped / 1e6))
).round({'num_insert_sites_per_feat_per_kb': 2, 'NRM_score': 2, 'NIM_score': 2})
print(list(pimms_result_table.columns.values))
# test diagnostic files
# pimms_result_table.to_csv("pimms_coords_join_prt2_" + condition_label + ".txt", index=False, sep='\t', header=False)
pimms_result_table = pimms_result_table[['seq_id',
'ID',
'locus_tag',
'type',
'gene',
'start',
'end',
'feat_length',
'product'] + gff_extra +
['num_insertions_mapped_per_feat',
'num_insert_sites_per_feat',
'num_insert_sites_per_feat_per_kb',
'first_insert_posn_as_percentile',
'last_insert_posn_as_percentile',
'NRM_score', # Normalised Reads Mapped
'NIM_score']] # Normalised Insertions Mapped
print(list(pimms_result_table.columns.values))
# pimms_result_table_full gff_columns_addback
navalues = {'num_insertions_mapped_per_feat': int(0),
'num_insert_sites_per_feat': int(0),
'num_insert_sites_per_feat_per_kb': int(0),
'first_insert_posn_as_percentile': int(0),
'last_insert_posn_as_percentile': int(0),
'NRM_score': int(0),
'NIM_score': int(0)}
pimms_result_table_full = pd.merge(gff_columns_addback, pimms_result_table, how='left').fillna(value=navalues)
# test diagnostic files
# gff_columns_addback.to_csv("pimms_coords_join_gff_columns_addback_" + condition_label + ".txt", index=False, sep='\t', header=False)
# pimms_result_table_full.to_csv("pimms_coords_join_prtf1_" + condition_label + ".txt", index=False, sep='\t', header=False)
# if set add prefix to columns
if condition_label:
label_cols = pimms_result_table_full.columns[
pimms_result_table_full.columns.isin(['num_insertions_mapped_per_feat',
'num_insert_sites_per_feat',
'num_insert_sites_per_feat_per_kb',
'first_insert_posn_as_percentile',
'last_insert_posn_as_percentile',
'NRM_score',
'NIM_score'])]
pimms_result_table_full.rename(columns=dict(zip(label_cols, condition_label + '_' + label_cols)),
inplace=True)
return pimms_result_table_full
# end of coordinates_to_features()
############################
# SAM_COORDS FUNCTIONS end
############################
def parse_arguments():
ap = configargparse.ArgumentParser( # description='PIMMS2 sam/bam processing',
prog="pimms2",
add_config_file_help=False,
config_file_parser_class=configargparse.DefaultConfigFileParser,
epilog="\n\n*** N.B. This is a development version ***\n \n ",
description='''description here'''
)
ap.add_argument('-v', '--version', action='version', version='%(prog)s 2.1 demo')
modes = ap.add_subparsers(parser_class=configargparse.ArgParser, dest='command')
findflank = modes.add_parser("find_flank", add_config_file_help=False,
help="Mode: find read regions flanking the IS sequence by mapping them to the target genome",
description="Args that start with '--' (eg. --fasta) can also be set in a config file (specified via -c)")
samcoords = modes.add_parser("bam_extract", add_config_file_help=False,
help="Mode: extract insertion site coordinates from sam file",
description="Args that start with '--' (eg. --fasta) can also be set in a config file (specified via -c)")
tablemerge = modes.add_parser("table_merge", add_config_file_help=False,
help='Mode: merge two compatible PIMMS results tables '
'(N.B: this step does a simple table join and does not check the data)').add_mutually_exclusive_group()
fullprocess = modes.add_parser("full_process", add_config_file_help=False,
help="Mode: find_flank + bam_extract",
description="Args that start with '--' (eg. --fasta) can also be set in a config file (specified via -c)")
# FIND_FLANK args
# to fix: nargs='?' deal with mistaken use of nargs=1 which give a single element list
findflank.add_argument("-c", "--config", required=False, is_config_file=True, # dest='config_file',
metavar='pimms2.config',
help="use parameters from config file")
findflank.add_argument("--nano", required=False, action='store_true', default=False,
help="override with settings more suitable for nanopore")
findflank.add_argument("--fasta", required=False, nargs=1, metavar='ref_genome.fasta', type=extant_file,
help="fasta file for reference genome ")
findflank.add_argument("--qual_char", required=False, nargs='?', type=str, default='0', choices=[chr(x + 33) for x in list(range(12, 31))],
help="substitute a quality score ascii character when fasta read files used (nanopore only) (phred +33: ascii +:?) ['0']")
findflank.add_argument("--nomap", required=False, action='store_true', default=False,
help="do not run mapping step")
findflank.add_argument("--mapper", required=False, nargs='?', type=str, default='bwa', choices=['minimap2', 'bwa'],
help="select mapping software from available options")
findflank.add_argument("--single", required=False, action='store_true', default=False,
help="only single direction Illumina data provided")
findflank.add_argument("--keep", required=False, action='store_true', default=False,
help="keep intermediate fastq files etc for diagnostic purposes")
findflank.add_argument("--lev", required=False, nargs=1, type=int, default=[0],
help="use Levenshtein distance (combined insert|del|sub score) [0]")
findflank.add_argument("--sub", required=False, nargs=1, type=int, default=[1],
help="number of permitted base substitutions in motif match [1]")
findflank.add_argument("--insert", required=False, nargs=1, type=int, default=[0],
help="number of permitted base insertions in motif match [0]")
findflank.add_argument("--del", required=False, nargs=1, type=int, default=[0], dest='deletion',
help="number of permitted base insertions in motif match [0]")
findflank.add_argument("--in_dir", required=True, nargs=1, dest='in_dir', type=extant_file,
help="directory containing input fastq files (assumed to match '*q.gz' or '*.fastq')")
findflank.add_argument("--fwdrev", required=False, nargs=1, type=str, default=['_R1_,_R2_'],
help="text substring to uniquely identify illumina fwd/rev paired fastq files ['_R1_,_R2_']")
findflank.add_argument("--out_dir", required=False, nargs=1, metavar='out_dir', default=[''],
action='store',
help="directory to contain result files ['pimms2_`label`_`dmy`_`HMS`']")
findflank.add_argument("--cpus", required=False, nargs=1, type=int, # default=[4],
default=[int(os.cpu_count() / 2)],
help="number of processors to use [(os.cpu_count() / 2)] ")
findflank.add_argument("--max", required=False, nargs=1, type=int, default=[60],
help="clip results to this length [60]")
findflank.add_argument("--min", required=False, nargs=1, type=int, default=[25],
help="minimum read length [25]")
findflank.add_argument("--motif1", required=False, nargs=1, type=str, default=['TCAGAAAACTTTGCAACAGAACC'],
# revcomp: GGTTCTGTTGCAAAGTTTTCTGA
help="IS end reference motif1 [TCAGAAAACTTTGCAACAGAACC](pGh9)")
findflank.add_argument("--motif2", required=False, nargs=1, type=str, default=['GGTTCTGTTGCAAAGTTTAAAAA'],
# revcomp: TTTTTAAACTTTGCAACAGAACC
help="IS end reference motif2 [GGTTCTGTTGCAAAGTTTAAAAA](pGh9)")
findflank.add_argument("--label", required=True, nargs=1, metavar='condition_name', default=[''],
help="identifying text tag to add to results file")
# SAM EXTRACT args
samcoords.add_argument("-c", "--config", required=False, is_config_file=True,
metavar='pimms2.config',
help="use parameters from config file")
samcoords.add_argument("--bam", required=True, nargs=1, metavar='pimms.bam/sam', type=extant_file,
help="bam/sam file of mapped IS flanking sequences ")
samcoords.add_argument("--nano", required=False, action='store_true', default=False,
help="override with settings more suitable for nanopore")
samcoords.add_argument("--label", required=True, nargs=1, metavar='condition_name', default=[''],
help="text tag to add to results file")
samcoords.add_argument("--mismatch", required=False, nargs=1, type=float, metavar='float', default=[None],
choices=[round(x * 0.01, 2) for x in range(0, 21)],
help="fraction of permitted mismatches in mapped read ( 0 <= mismatch < 0.2) [no filter]")
samcoords.add_argument("--min_depth", required=False, nargs=1, type=int, default=[2], metavar='int',
help="minimum read depth at insertion site >= int [2]")
samcoords.add_argument("--noreps", required=False, action='store_true', default=False,
help="do not separate illumina read groups as replicate insertion count columns")
samcoords.add_argument("--gff", required=True, nargs=1, type=extant_file, default='', metavar='genome.gff',
help="GFF3 formatted file to use\n(note fasta sequence present in the file must be deleted before use)")
samcoords.add_argument("--gff_extra", required=False, nargs=1, type=str, default='', metavar="'x,y,z'",
help="comma separated list of extra fields to include from the GFF3 annotation\ne.g. 'ID,translation,note' ")
samcoords.add_argument("--gff_force", required=False, action='store_true', default=False,
help="override GFF/BAM seq id discrepancies e.g. use when the gff has a plasmid not present in the reference sequence or vice-versa")
samcoords.add_argument("--out_fmt", required=False, nargs=1, type=str, default=['xlsx'],
choices=['xlsx', 'tsv', 'csv'],
help="set results table file format tab/comma separated or Excel (tsv|csv|xlsx) [xlsx]")
# TABLE_MERGE args
tablemerge.add_argument("--xlsx", required=False, nargs=2, type=extant_file,
help="2x .xlsx Excel files")
tablemerge.add_argument("--csv", required=False, nargs=2, type=extant_file,
help="2x .csv comma separated text/table files")
tablemerge.add_argument("--tsv", required=False, nargs=2, type=extant_file,
help='2x .tsv tab (\\t) separated text/table files')
# FULL_PROCESS ##########################
fullprocess.add_argument("-c", "--config", required=False, is_config_file=True,
metavar='pimms2_run.config',
help="use parameters from config file")
fullprocess.add_argument("--nano", required=False, action='store_true', default=False,
help="override with settings more suitable for nanopore")
fullprocess.add_argument("--qual_char", required=False, nargs='?', type=str, default='0', choices=[chr(x + 33) for x in list(range(12, 31))],
help="substitute a quality score ascii character when fasta read files used (nanopore only) (phred +33: ascii +:?) ['0']")
fullprocess.add_argument("--fasta", required=False, nargs=1, metavar='ref_genome.fasta', type=extant_file,
help="fasta file for reference genome ")
fullprocess.add_argument("--nomap", required=False, action='store_true', default=False,
help="do not run mapping step")
fullprocess.add_argument("--mapper", required=False, nargs='?', type=str, default='bwa', choices=['minimap2', 'bwa'],
help="select mapping software from available options")
fullprocess.add_argument("--single", required=False, action='store_true', default=False,
help="only single direction Illumina data provided")
fullprocess.add_argument("--keep", required=False, action='store_true', default=False,
help="keep intermediate files for diagnostic purposes")
fullprocess.add_argument("--lev", required=False, nargs=1, type=int, default=[0],
help="use Levenshtein distance (combined insert|del|sub score)")
fullprocess.add_argument("--sub", required=False, nargs=1, type=int, default=[1],
help="number of permitted base substitutions in motif match [1]")
fullprocess.add_argument("--insert", required=False, nargs=1, type=int, default=[0],
help="number of permitted base insertions in motif match [0]")
fullprocess.add_argument("--del", required=False, nargs=1, type=int, default=[0], dest='deletion',
help="number of permitted base insertions in motif match [0]")
fullprocess.add_argument("--in_dir", required=True, nargs=1, dest='in_dir', type=extant_file,
help="directory containing input fastq files (assumed to match '*q.gz' or '*.fastq')")
fullprocess.add_argument("--fwdrev", required=False, nargs=1, type=str, default=['_R1_,_R2_'],
help="text substring to uniquely identify illumina fwd/rev paired fastq files ['_R1_,_R2_']")
fullprocess.add_argument("--out_dir", required=False, nargs=1, metavar='out_dir', default=[''],
action='store',
help="directory to contain result files ['pimms2_`label`_`dmy`_`HMS`']")
fullprocess.add_argument("--cpus", required=False, nargs=1, type=int, # default=int(4),
default=[int(os.cpu_count() / 2)],
help="number of processors to use [(os.cpu_count() / 2)] ")
fullprocess.add_argument("--max", required=False, nargs=1, type=int, default=[60],
help="clip results to this length [60]")
fullprocess.add_argument("--min", required=False, nargs=1, type=int, default=[25],
help="minimum read length [25]")
fullprocess.add_argument("--motif1", required=False, nargs=1, type=str, default=['TCAGAAAACTTTGCAACAGAACC'],
# revcomp: GGTTCTGTTGCAAAGTTTTCTGA
help="IS end reference motif1 [TCAGAAAACTTTGCAACAGAACC](pGh9)")
fullprocess.add_argument("--motif2", required=False, nargs=1, type=str, default=['GGTTCTGTTGCAAAGTTTAAAAA'],
# revcomp: TTTTTAAACTTTGCAACAGAACC
help="IS end reference motif2 [GGTTCTGTTGCAAAGTTTAAAAA](pGh9)")
fullprocess.add_argument("--label", required=True, nargs=1, metavar='condition_name', default=[''],
help="identifying text tag to add to results file")
fullprocess.add_argument("--bam", required=False, nargs=1, metavar='pimms.bam/sam', # type=extant_file,
type=str, default=['bam?'],
help=configargparse.SUPPRESS)
# samcoords.add_argument("--nano", required=False, action='store_true', default=False,
# help="override with settings more suitable for nanopore")
# samcoords.add_argument("--label", required=False, nargs=1, metavar='condition_name', default=[''],
# help="text tag to add to results file")
fullprocess.add_argument("--mismatch", required=False, nargs=1, type=float, metavar='float', default=[None],
choices=[round(x * 0.01, 2) for x in range(0, 21)],
help="fraction of permitted mismatches in mapped read ( 0 <= mismatch < 0.2) [no filter]")
fullprocess.add_argument("--min_depth", required=False, nargs=1, type=int, default=[2], metavar='int',
help="minimum read depth at insertion site >= int [2]")
fullprocess.add_argument("--noreps", required=False, action='store_true', default=False,
help="do not separate illumina read groups as replicate insertion count columns")
fullprocess.add_argument("--gff", required=True, nargs=1, type=extant_file, default='', metavar='genome.gff',
help="GFF3 formatted file to use\n(note fasta sequence present in the file must be deleted before use)")
fullprocess.add_argument("--gff_extra", required=False, nargs=1, type=str, default='', metavar="'x,y,z'",
help="comma separated list of extra fields to include from the GFF3 annotation\ne.g. 'ID,translation,note' ")
fullprocess.add_argument("--gff_force", required=False, action='store_true', default=False,
help="override GFF/BAM seq id discrepancies "
"e.g. use when the gff has a plasmid not present in the reference sequence or vice-versa")
fullprocess.add_argument("--out_fmt", required=False, nargs=1, type=str, default=['xlsx'],
choices=['xlsx', 'tsv', 'csv'],
help="set results table file format tab/comma separated or Excel (tsv|csv|xlsx) [xlsx]")
local_parsed_args = ap.parse_known_args()
print("-----------------")
ap.print_values()
print("-----------------")
# print(ap.format_values())
print(local_parsed_args)
print("-----------------")
#
# exit and print short help message if no mode/arguments supplied
if len(sys.argv) <= 2:
ap.print_usage()
sys.exit(1)
if local_parsed_args[0].command == 'find_flank':
if not local_parsed_args[0].nomap:
prog_in_path_check(local_parsed_args[0].mapper)
# prog_in_path_check('bwa')
if local_parsed_args[0].fasta is None:
ap.error("unless the --nomap flag is used please supply a sequence file e.g: --fasta contigs.fasta")
elif not local_parsed_args[0].label:
ap.error("unless the --nomap flag is used please supply a text label string --label cond_01")
else:
print("reference seq file provided: " + local_parsed_args[0].fasta[0])
# print("##########")
# print(ap.format_values()) # useful for logging where different settings came from
# sys.exit(1)
# print("\n\n\n")
# print(parsed_args[0].command)
# print("----------======")
# print(ap.)
# sys.exit(1)
return local_parsed_args
# elif parsed_args[0].command == 'bam_extract':
def bam_extract_func(parsed_args_be):
print(pimms_mssg + parsed_args_be[0].command + pimms_mssg2)
if parsed_args_be[0].nano:
parsed_args_be[0].noreps = True
# sort out extra requested gff annotation fields
if parsed_args_be[0].gff_extra:
# strip any formatting quotes and turn comma separated string into a list of fields
gff_extra = parsed_args_be[0].gff_extra[0].strip("'\"").split(',')
else:
gff_extra = []
# process the gff file to get required fields
print("extra gff fields: " + str(gff_extra))
gff_file = parsed_args_be[0].gff[0]
gff_feat_type = ['CDS', 'tRNA', 'rRNA']
min_depth_cutoff = parsed_args_be[0].min_depth[0]
fraction_mismatch = parsed_args_be[0].mismatch[0]
sam_file = parsed_args_be[0].bam[0]
condition_label = parsed_args_be[0].label[0]
print("\ncond label " + condition_label + "\n")
# process pimms sam/bam file and produce coordinate / bed files
sam_dir, db_rdir, info_rdir = make_results_dirs_in_sam_dir(sam_file, condition_label)
# process the gff file to get required fields
gff_columns_addback, attr_to_columns = process_gff(gff_file, gff_feat_type, gff_extra, info_rdir)
gff_columns_addback_pseudo, attr_to_columns_pseudo = process_gff(gff_file, ['pseudogene'], [], info_rdir)
seqid_consistancy_check(gff_columns_addback, sam_file)
process_sam(sam_file, min_depth_cutoff, fraction_mismatch)
sam_stem = modify_sam_stem(sam_file, min_depth_cutoff, fraction_mismatch)
# allocate insertions to features and create results merged with GFF
# possibly poor coding to merge with gff here
pimms_result_table_full = coordinates_to_features(sam_stem, attr_to_columns, gff_columns_addback, condition_label,
min_depth_cutoff, gff_extra, db_rdir)
# if parsed_args[0].nano:
# print("--noreps forced for nanopore data\n")
if not parsed_args_be[0].noreps:
print("processing read groups as replicates for illumina data\n")
mp_reps_feature_counts = coordinates_to_features_reps(sam_stem, attr_to_columns, condition_label)
if not mp_reps_feature_counts.empty:
merged_with_reps = pimms_result_table_full.merge(mp_reps_feature_counts, on=["seq_id", "start", "end"],
how='outer')
# how='inner')
pimms_result_table_full = merged_with_reps.fillna(0)
else:
print("not processing read groups as replicates\n")
if not gff_columns_addback_pseudo.empty:
tag_psueudogenes = gff_columns_addback_pseudo['locus_tag']
pimms_result_table_full.loc[pimms_result_table_full.locus_tag.isin(tag_psueudogenes), "type"] = \
pimms_result_table_full['type'] + '_pseudo'
# print(parsed_args_be[0].out_fmt[0] + "out_fmt\n")
# write results as text/excel
if parsed_args_be[0].out_fmt[0] == 'tsv':
pimms_result_table_full.to_csv(sam_stem + "_countinfo.tsv", index=False, sep='\t')
elif parsed_args_be[0].out_fmt[0] == 'csv':
pimms_result_table_full.to_csv(sam_stem + "_countinfo.csv", index=False, sep=',')
else:
writer = pd.ExcelWriter(sam_stem + '_countinfo.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
pimms_result_table_full.to_excel(writer, sheet_name='PIMMS2_result', index=False)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
os.rename(sam_stem + "_countinfo." + parsed_args_be[0].out_fmt[0], os.path.join(db_rdir, sam_stem + "_countinfo." + parsed_args_be[0].out_fmt[0]))
os.rename(sam_stem + "_insert_coords.txt", os.path.join(info_rdir, sam_stem + "_insert_coords.txt"))
os.rename(sam_stem + ".bed", os.path.join(info_rdir, sam_stem + ".bed"))
# end bam_extract_func
def table_merge_func(parsed_args_tm):
print(pimms_mssg + parsed_args_tm[0].command + pimms_mssg2)
if parsed_args_tm[0].xlsx:
print("Join: ", parsed_args_tm[0].xlsx[0], "\t", parsed_args_tm[0].xlsx[1], "\n")
# requires dependancy installed
result_df1 = pd.read_excel(parsed_args_tm[0].xlsx[0], engine="openpyxl")
result_df2 = pd.read_excel(parsed_args_tm[0].xlsx[1], engine="openpyxl")
results_merged = pd.DataFrame.merge(result_df1, result_df2)
writer = | pd.ExcelWriter('merged_result.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
import sys
import pandas as pd
import numpy as np
import catboost
DUR_RU = 'Длительность разговора с оператором, сек'
DUR_EN = 'oper_duration'
RU_COLS = [
'Время начала вызова', 'Время окончания вызова', 'Время постановки в очередь',
'Время переключения на оператора', 'Время окончания разговора с оператором',
]
EN_COLS = ['call_start_time', 'call_end_time', 'queue_time', 'oper_start_time', 'oper_end_time']
zero_time = pd.to_datetime('00:00:00')
SEC_PER_DAY = 60*60*24
NA_VALUE = -1.2345
def extract_features(data):
times = data[RU_COLS].apply(pd.to_datetime)
times.columns = EN_COLS
abs_times = times.apply(lambda x: (x - zero_time).dt.total_seconds()).fillna(NA_VALUE)
abs_times.columns = [c + '_abs' for c in EN_COLS]
day = abs_times / SEC_PER_DAY * 2 * np.pi
hour = (abs_times % (24 * 60)) / (24 * 60) * 2 * np.pi
minute = (abs_times % 60) / 60 * 2 * np.pi
day_sines = np.sin(day)
day_cosines = np.cos(day)
hour_sines = np.sin(hour)
hour_cosines = np.cos(hour)
minute_sines = np.sin(minute)
minute_cosines = np.cos(minute)
day_sines.columns = ['day_sin__' + c for c in EN_COLS]
day_cosines.columns = ['day_cos__' + c for c in EN_COLS]
hour_sines.columns = ['hour_sin__' + c for c in EN_COLS]
hour_cosines.columns = ['hour_cos__' + c for c in EN_COLS]
minute_sines.columns = ['minute_sin__' + c for c in EN_COLS]
minute_cosines.columns = ['minute_cos__' + c for c in EN_COLS]
null_times = times.isnull().astype(int)
null_times.columns = [c + "_miss" for c in EN_COLS]
diffs = pd.DataFrame(index=times.index)
for i, c1 in enumerate(EN_COLS):
for j, c2 in enumerate(EN_COLS[(i + 1):]):
diffs['delta_{}_{}'.format(c1, c2)] = (times[c2] - times[c1]).dt.total_seconds().fillna(NA_VALUE)
deltas_base = 'delta_call_start_time_call_end_time'
for c in diffs.columns:
d = diffs[c] / diffs[deltas_base]
diffs['rel_{}'.format(c)] = d
x = pd.concat(
[abs_times, day_sines, day_cosines, hour_sines, hour_cosines, minute_sines, minute_cosines, null_times, diffs],
axis=1)
x[DUR_EN] = data[DUR_RU].fillna(NA_VALUE)
x[DUR_EN + '_miss'] = data[DUR_RU].isnull().astype(int)
devia = x['delta_oper_start_time_oper_end_time'] - x[DUR_EN]
devia[x['oper_duration_miss'] == 1] = 0
devia[x['delta_oper_start_time_oper_end_time'] < 0] = 0
x['oper_time_deviation'] = devia
return x
if __name__ == '__main__':
input_csv = sys.argv[1]
output_csv = sys.argv[2]
df_test = | pd.read_csv(input_csv, index_col='id') | pandas.read_csv |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pkgutil
import sys
from tabulate import tabulate
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO, BytesIO
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# print("parent_dir")
# print(parent_dir)
# sys.path.append(parent_dir)
from ..trex_exe import Trex, TrexOutputs
print("sys.path")
print(sys.path)
# load transposed qaqc data for inputs and expected outputs
# this works for both local nosetests and travis deploy
# input details
try:
if __package__ is not None:
csv_data = pkgutil.get_data(__package__, 'trex_qaqc_in_transpose.csv')
data_inputs = BytesIO(csv_data)
pd_obj_inputs = pd.read_csv(data_inputs, index_col=0, engine='python')
else:
#csv_transpose_path_in = "./trex_qaqc_in_transpose.csv"
csv_transpose_path_in = os.path.join(os.path.dirname(__file__),"trex_qaqc_in_transpose.csv")
# print(csv_transpose_path_in)
pd_obj_inputs = pd.read_csv(csv_transpose_path_in, index_col=0, engine='python')
pd_obj_inputs['csrfmiddlewaretoken'] = '<PASSWORD>'
# with open('./trex_qaqc_in_transpose.csv') as f:
# csv_data = csv.reader(f)
finally:
pass
# print("trex inputs")
# print(pd_obj_inputs.shape)
# print('trex expected output keys ' + str(pd_obj_inputs.columns.values.tolist()))
# print(tabulate(pd_obj_inputs.iloc[:,0:5], headers='keys', tablefmt='plain'))
# print(tabulate(pd_obj_inputs.iloc[:,6:10], headers='keys', tablefmt='plain'))
# print(tabulate(pd_obj_inputs.iloc[:,11:13], headers='keys', tablefmt='plain'))
# print(tabulate(pd_obj_inputs.iloc[:,14:17], headers='keys', tablefmt='plain'))
# load transposed qaqc data for expected outputs
try:
if __package__ is not None:
data_exp_outputs = BytesIO(pkgutil.get_data(__package__, 'trex_qaqc_exp_transpose.csv'))
pd_obj_exp = pd.read_csv(data_exp_outputs, index_col=0, engine= 'python')
else:
#csv_transpose_path_exp = "./trex_qaqc_exp_transpose.csv"
csv_transpose_path_exp = os.path.join(os.path.dirname(__file__),"trex_qaqc_exp_transpose.csv")
# print(csv_transpose_path_exp)
pd_obj_exp = pd.read_csv(csv_transpose_path_exp, index_col=0, engine='python')
finally:
pass
# print("trex expected outputs")
# print('trex expected output dimensions ' + str(pd_obj_exp.shape))
# print('trex expected output keys ' + str(pd_obj_exp.columns.values.tolist()))
# print(tabulate(pd_obj_exp.iloc[:,0:5], headers='keys', tablefmt='plain'))
# print(tabulate(pd_obj_exp.iloc[:,6:10], headers='keys', tablefmt='plain'))
# print(tabulate(pd_obj_exp.iloc[:,11:14], headers='keys', tablefmt='plain'))
# print(tabulate(pd_obj_exp.iloc[:,15:16], headers='keys', tablefmt='plain'))
# create an instance of trex object with qaqc data
trex_output_empty = TrexOutputs()
trex_calc = Trex(pd_obj_inputs, pd_obj_exp)
trex_calc.execute_model()
inputs_json, outputs_json, exp_out_json = trex_calc.get_dict_rep()
# print("trex output")
# print(inputs_json)
# print("####")
# print(trex_calc)
test = {}
# trex_calc.execute_model()
class TestTrex(unittest.TestCase):
"""
Integration tests for trex.
"""
def setUp(self):
"""
Setup routine for trex.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for trex.
:return:
"""
pass
def test_assert_output_series(self):
""" Verify that each output variable is a pd.Series """
try:
num_variables = len(trex_calc.pd_obj_out.columns)
result = pd.Series(False, index=list(range(num_variables)), dtype='bool')
expected = pd.Series(True, index=list(range(num_variables)), dtype='bool')
for i in range(num_variables):
column_name = trex_calc.pd_obj_out.columns[i]
output = getattr(trex_calc, column_name)
if isinstance(output, pd.Series):
result[i] = True
tab = | pd.concat([result,expected], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 16 09:50:42 2019
@author: michaelek
"""
import os
import numpy as np
import pandas as pd
import yaml
from allotools.data_io import get_permit_data, get_usage_data, allo_filter
from allotools.allocation_ts import allo_ts
from allotools.utils import grp_ts_agg
# from allotools.plot import plot_group as pg
# from allotools.plot import plot_stacked as ps
from datetime import datetime
# from scipy.special import erfc
# from matplotlib.pyplot import show
#########################################
### parameters
base_path = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(base_path, 'parameters.yml')) as param:
param = yaml.safe_load(param)
pk = ['permit_id', 'wap', 'date']
dataset_types = ['allo', 'metered_allo', 'usage', 'usage_est']
allo_type_dict = {'D': 'max_daily_volume', 'W': 'max_daily_volume', 'M': 'max_annual_volume', 'A-JUN': 'max_annual_volume', 'A': 'max_annual_volume'}
# allo_mult_dict = {'D': 0.001*24*60*60, 'W': 0.001*24*60*60*7, 'M': 0.001*24*60*60*30, 'A-JUN': 0.001*24*60*60*365, 'A': 0.001*24*60*60*365}
temp_datasets = ['allo_ts', 'total_allo_ts', 'wap_allo_ts', 'usage_ts', 'metered_allo_ts']
#######################################
### Testing
# from_date = '2000-07-01'
# to_date = '2020-06-30'
#
# self = AlloUsage(from_date=from_date, to_date=to_date)
#
# results1 = self.get_ts(['allo', 'metered_allo', 'usage'], 'M', ['permit_id', 'wap'])
# results2 = self.get_ts(['usage'], 'D', ['wap'])
# results3 = self.get_ts(['allo', 'metered_allo', 'usage', 'usage_est'], 'M', ['permit_id', 'wap'])
# results3 = self.get_ts(['allo', 'metered_allo', 'usage', 'usage_est'], 'D', ['permit_id', 'wap'])
# wap_filter = {'wap': ['C44/0001']}
#
# self = AlloUsage(from_date=from_date, to_date=to_date, wap_filter=wap_filter)
#
# results1 = self.get_ts(['allo', 'metered_allo', 'usage'], 'M', ['permit_id', 'wap'])
# results2 = self.get_ts(['usage'], 'D', ['wap'])
# permit_filter = {'permit_id': ['200040']}
#
# self = AlloUsage(from_date=from_date, to_date=to_date, permit_filter=permit_filter)
#
# results1 = self.get_ts(['allo', 'metered_allo', 'usage', 'usage_est'], 'M', ['permit_id', 'wap'])
# results2 = self.get_ts(['allo', 'metered_allo', 'usage', 'usage_est'], 'D', ['permit_id', 'wap'])
########################################
### Core class
class AlloUsage(object):
"""
Class to to process the allocation and usage data in NZ.
Parameters
----------
from_date : str or None
The start date of the consent and the final time series. In the form of '2000-01-01'. None will return all consents and subsequently all dates.
to_date : str or None
The end date of the consent and the final time series. In the form of '2000-01-01'. None will return all consents and subsequently all dates.
permit_filter : dict
If permit_id_filter is a list, then it should represent the columns from the permit table that should be returned. If it's a dict, then the keys should be the column names and the values should be the filter on those columns.
wap_filter : dict
If wap_filter is a list, then it should represent the columns from the wap table that should be returned. If it's a dict, then the keys should be the column names and the values should be the filter on those columns.
only_consumptive : bool
Should only the consumptive takes be returned? Default True
include_hydroelectric : bool
Should hydro-electric takes be included? Default False
Returns
-------
AlloUsage object
with all of the base sites, allo, and allo_wap DataFrames
"""
dataset_types = dataset_types
# plot_group = pg
# plot_stacked = ps
_usage_remote = param['remote']['usage']
_permit_remote = param['remote']['permit']
### Initial import and assignment function
def __init__(self, from_date=None, to_date=None, permit_filter=None, wap_filter=None, only_consumptive=True, include_hydroelectric=False):
"""
Parameters
----------
from_date : str or None
The start date of the consent and the final time series. In the form of '2000-01-01'. None will return all consents and subsequently all dates.
to_date : str or None
The end date of the consent and the final time series. In the form of '2000-01-01'. None will return all consents and subsequently all dates.
permit_filter : dict
If permit_id_filter is a list, then it should represent the columns from the permit table that should be returned. If it's a dict, then the keys should be the column names and the values should be the filter on those columns.
wap_filter : dict
If wap_filter is a list, then it should represent the columns from the wap table that should be returned. If it's a dict, then the keys should be the column names and the values should be the filter on those columns.
only_consumptive : bool
Should only the consumptive takes be returned? Default True
include_hydroelectric : bool
Should hydro-electric takes be included? Default False
Returns
-------
AlloUsage object
with all of the base sites, allo, and allo_wap DataFrames
"""
permits0 = get_permit_data(self._permit_remote['connection_config'], self._permit_remote['bucket'], self._permit_remote['permits_key'])
waps, permits = allo_filter(permits0, from_date, to_date, permit_filter=permit_filter, wap_filter=wap_filter, only_consumptive=only_consumptive, include_hydroelectric=include_hydroelectric)
if from_date is None:
from_date1 = pd.Timestamp('1900-07-01')
else:
from_date1 = pd.Timestamp(from_date)
if to_date is None:
to_date1 = pd.Timestamp.now().floor('D')
else:
to_date1 = pd.Timestamp(to_date)
setattr(self, 'waps', waps)
setattr(self, 'permits', permits)
# setattr(self, 'sd', sd)
setattr(self, 'from_date', from_date1)
setattr(self, 'to_date', to_date1)
def _est_allo_ts(self):
"""
"""
### Run the allocation time series creation
### This has currently been hard-soded to only use the max rate. This should probably be changed once the permitting data gets fixed.
limit_col = allo_type_dict[self.freq]
# multiplier = allo_mult_dict[self.freq]
# limit_col = 'max_rate'
allo4 = allo_ts(self.permits, self.from_date, self.to_date, self.freq, limit_col).round()
allo4.name = 'total_allo'
# allo4 = (allo4 * multiplier).round()
# if self.irr_season and ('A' not in self.freq):
# dates1 = allo4.index.levels[2]
# dates2 = dates1[dates1.month.isin([10, 11, 12, 1, 2, 3, 4])]
# allo4 = allo4.loc[(slice(None), slice(None), dates2)]
setattr(self, 'total_allo_ts', allo4.reset_index())
def _allo_wap_spit(self):
"""
"""
allo6 = pd.merge(self.total_allo_ts, self.waps[['permit_id', 'wap', 'sd_ratio']], on=['permit_id'])
# allo6 = pd.merge(allo5, self.sd, on=['permit_id', 'wap'], how='left')
allo6['combo_wap_allo'] = allo6.groupby(['permit_id', 'hydro_feature', 'date'])['total_allo'].transform('sum')
allo6['combo_wap_ratio'] = allo6['total_allo']/allo6['combo_wap_allo']
allo6['wap_allo'] = allo6['total_allo'] * allo6['combo_wap_ratio']
allo7 = allo6.drop(['combo_wap_allo', 'combo_wap_ratio', 'total_allo'], axis=1).rename(columns={'wap_allo': 'total_allo'}).copy()
## Calculate the stream depletion
allo7.loc[allo7.sd_ratio.isnull() & (allo7.hydro_feature == 'groundwater'), 'sd_ratio'] = 0
allo7.loc[allo7.sd_ratio.isnull() & (allo7.hydro_feature == 'surface water'), 'sd_ratio'] = 1
allo7['sw_allo'] = allo7['total_allo'] * allo7['sd_ratio']
allo7['gw_allo'] = allo7['total_allo'] - allo7['sw_allo']
allo8 = allo7.drop(['hydro_feature', 'sd_ratio'], axis=1).groupby(pk).mean()
setattr(self, 'wap_allo_ts', allo8)
def _get_allo_ts(self):
"""
Function to create an allocation time series.
"""
if not hasattr(self, 'total_allo_ts'):
self._est_allo_ts()
### Convert to GW and SW allocation
self._allo_wap_spit()
def _process_usage(self):
"""
"""
if not hasattr(self, 'wap_allo_ts'):
self._get_allo_ts()
allo1 = self.wap_allo_ts.copy().reset_index()
waps = allo1.wap.unique().tolist()
## Get the ts data and aggregate
if hasattr(self, 'usage_ts_daily'):
tsdata1 = self.usage_ts_daily
else:
tsdata1, stns_waps = get_usage_data(self._usage_remote['connection_config'], self._usage_remote['bucket'], waps, self.from_date, self.to_date)
tsdata1.rename(columns={'water_use': 'total_usage', 'time': 'date'}, inplace=True)
tsdata1 = tsdata1[['wap', 'date', 'total_usage']].copy()
## filter - remove individual spikes and negative values
tsdata1.loc[tsdata1['total_usage'] < 0, 'total_usage'] = 0
def remove_spikes(x):
val1 = bool(x[1] > (x[0] + x[2] + 2))
if val1:
return (x[0] + x[2])/2
else:
return x[1]
tsdata1.iloc[1:-1, 2] = tsdata1['total_usage'].rolling(3, center=True).apply(remove_spikes, raw=True).iloc[1:-1]
setattr(self, 'usage_ts_daily', tsdata1)
## Convert station data to DataFrame
stns_waps1 = | pd.DataFrame([{'wap': s['ref'], 'lon': s['geometry']['coordinates'][0], 'lat': s['geometry']['coordinates'][1]} for s in stns_waps]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
"""
tqsdk.ta 模块包含了一批常用的技术指标计算函数
"""
import numpy as np
import pandas as pd
import numba
from tqsdk import ta_func
def ATR(df, n):
"""平均真实波幅"""
new_df = pd.DataFrame()
pre_close = df["close"].shift(1)
new_df["tr"] = np.where(df["high"] - df["low"] > np.absolute(pre_close - df["high"]),
np.where(df["high"] - df["low"] > np.absolute(pre_close - df["low"]),
df["high"] - df["low"], np.absolute(pre_close - df["low"])),
np.where(np.absolute(pre_close - df["high"]) > np.absolute(pre_close - df["low"]),
np.absolute(pre_close - df["high"]), np.absolute(pre_close - df["low"])))
new_df["atr"] = ta_func.ma(new_df["tr"], n)
return new_df
def BIAS(df, n):
"""乖离率"""
ma1 = ta_func.ma(df["close"], n)
new_df = pd.DataFrame(data=list((df["close"] - ma1) / ma1 * 100), columns=["bias"])
return new_df
def BOLL(df, n, p):
"""布林线"""
new_df = pd.DataFrame()
mid = ta_func.ma(df["close"], n)
std = df["close"].rolling(n).std()
new_df["mid"] = mid
new_df["top"] = mid + p * std
new_df["bottom"] = mid - p * std
return new_df
def DMI(df, n, m):
"""动向指标"""
new_df = pd.DataFrame()
new_df["atr"] = ATR(df, n)["atr"]
pre_high = df["high"].shift(1)
pre_low = df["low"].shift(1)
hd = df["high"] - pre_high
ld = pre_low - df["low"]
admp = ta_func.ma(pd.Series(np.where((hd > 0) & (hd > ld), hd, 0)), n)
admm = ta_func.ma(pd.Series(np.where((ld > 0) & (ld > hd), ld, 0)), n)
new_df["pdi"] = pd.Series(np.where(new_df["atr"] > 0, admp / new_df["atr"] * 100, np.NaN)).ffill()
new_df["mdi"] = pd.Series(np.where(new_df["atr"] > 0, admm / new_df["atr"] * 100, np.NaN)).ffill()
ad = pd.Series(np.absolute(new_df["mdi"] - new_df["pdi"]) / (new_df["mdi"] + new_df["pdi"]) * 100)
new_df["adx"] = ta_func.ma(ad, m)
new_df["adxr"] = (new_df["adx"] + new_df["adx"].shift(m)) / 2
return new_df
def KDJ(df, n, m1, m2):
"""随机指标"""
new_df = pd.DataFrame()
hv = df["high"].rolling(n).max()
lv = df["low"].rolling(n).min()
rsv = pd.Series(np.where(hv == lv, 0, (df["close"] - lv) / (hv - lv) * 100))
new_df["k"] = ta_func.sma(rsv, m1, 1)
new_df["d"] = ta_func.sma(new_df["k"], m2, 1)
new_df["j"] = 3 * new_df["k"] - 2 * new_df["d"]
return new_df
def MACD(df, short, long, m):
"""异同移动平均线"""
new_df = pd.DataFrame()
eshort = ta_func.ema(df["close"], short)
elong = ta_func.ema(df["close"], long)
new_df["diff"] = eshort - elong
new_df["dea"] = ta_func.ema(new_df["diff"], m)
new_df["bar"] = 2 * (new_df["diff"] - new_df["dea"])
return new_df
@numba.njit
def _sar(open, high, low, close, range_high, range_low, n, step, maximum):
sar = np.empty_like(close)
sar[:n] = np.NAN
af = 0
ep = 0
trend = 1 if (close[n] - open[n]) > 0 else -1
if trend == 1:
sar[n] = min(range_low[n - 2], low[n - 1])
else:
sar[n] = max(range_high[n - 2], high[n - 1])
for i in range(n, len(sar)):
if i != n:
if abs(trend) > 1:
sar[i] = sar[i - 1] + af * (ep - sar[i - 1])
elif trend == 1:
sar[i] = min(range_low[i - 2], low[i - 1])
elif trend == -1:
sar[i] = max(range_high[i - 2], high[i - 1])
if trend > 0:
if sar[i - 1] > low[i]:
ep = low[i]
af = step
trend = -1
else:
ep = high[i]
af = min(af + step, maximum) if ep > range_high[i - 1] else af
trend += 1
else:
if sar[i - 1] < high[i]:
ep = high[i]
af = step
trend = 1
else:
ep = low[i]
af = min(af + step, maximum) if ep < range_low[i - 1] else af
trend -= 1
return sar
def SAR(df, n, step, max):
"""抛物转向"""
range_high = df["high"].rolling(n - 1).max()
range_low = df["low"].rolling(n - 1).min()
sar = _sar(df["open"].values, df["high"].values, df["low"].values, df["close"].values, range_high.values,
range_low.values, n, step, max)
new_df = pd.DataFrame(data=sar, columns=["sar"])
return new_df
def WR(df, n):
"""威廉指标"""
hn = df["high"].rolling(n).max()
ln = df["low"].rolling(n).min()
new_df = pd.DataFrame(data=list((hn - df["close"]) / (hn - ln) * (-100)), columns=["wr"])
return new_df
def RSI(df, n):
"""相对强弱指标"""
lc = df["close"].shift(1)
rsi = ta_func.sma(pd.Series(np.where(df["close"] - lc > 0, df["close"] - lc, 0)), n, 1) / \
ta_func.sma(np.absolute(df["close"] - lc), n, 1) * 100
new_df = pd.DataFrame(data=rsi, columns=["rsi"])
return new_df
def ASI(df):
"""振动升降指标"""
lc = df["close"].shift(1) # 上一交易日的收盘价
aa = np.absolute(df["high"] - lc)
bb = np.absolute(df["low"] - lc)
cc = np.absolute(df["high"] - df["low"].shift(1))
dd = np.absolute(lc - df["open"].shift(1))
r = np.where((aa > bb) & (aa > cc), aa + bb / 2 + dd / 4,
np.where((bb > cc) & (bb > aa), bb + aa / 2 + dd / 4, cc + dd / 4))
x = df["close"] - lc + (df["close"] - df["open"]) / 2 + lc - df["open"].shift(1)
si = np.where(r == 0, 0, 16 * x / r * np.where(aa > bb, aa, bb))
new_df = pd.DataFrame(data=list(pd.Series(si).cumsum()), columns=["asi"])
return new_df
def VR(df, n):
"""VR 容量比率"""
lc = df["close"].shift(1)
vr = pd.Series(np.where(df["close"] > lc, df["volume"], 0)).rolling(n).sum() / pd.Series(
np.where(df["close"] <= lc, df["volume"], 0)).rolling(n).sum() * 100
new_df = pd.DataFrame(data=list(vr), columns=["vr"])
return new_df
def ARBR(df, n):
"""人气意愿指标"""
new_df = pd.DataFrame()
new_df["ar"] = (df["high"] - df["open"]).rolling(n).sum() / (df["open"] - df["low"]).rolling(n).sum() * 100
new_df["br"] = pd.Series(
np.where(df["high"] - df["close"].shift(1) > 0, df["high"] - df["close"].shift(1), 0)).rolling(
n).sum() / pd.Series(
np.where(df["close"].shift(1) - df["low"] > 0, df["close"].shift(1) - df["low"], 0)).rolling(n).sum() * 100
return new_df
def DMA(df, short, long, m):
"""平均线差"""
new_df = pd.DataFrame()
new_df["ddd"] = ta_func.ma(df["close"], short) - ta_func.ma(df["close"], long)
new_df["ama"] = ta_func.ma(new_df["ddd"], m)
return new_df
def EXPMA(df, p1, p2):
"""指数加权移动平均线组合"""
new_df = pd.DataFrame()
new_df["ma1"] = ta_func.ema(df["close"], p1)
new_df["ma2"] = ta_func.ema(df["close"], p2)
return new_df
def CR(df, n, m):
"""CR能量"""
new_df = pd.DataFrame()
mid = (df["high"] + df["low"] + df["close"]) / 3
new_df["cr"] = pd.Series(np.where(0 > df["high"] - mid.shift(1), 0, df["high"] - mid.shift(1))).rolling(
n).sum() / pd.Series(np.where(0 > mid.shift(1) - df["low"], 0, mid.shift(1) - df["low"])).rolling(n).sum() * 100
new_df["crma"] = ta_func.ma(new_df["cr"], m).shift(int(m / 2.5 + 1))
return new_df
def CCI(df, n):
"""顺势指标"""
typ = (df["high"] + df["low"] + df["close"]) / 3
ma = ta_func.ma(typ, n)
def mad(x):
return np.fabs(x - x.mean()).mean()
md = typ.rolling(window=n).apply(mad, raw=True) # 平均绝对偏差
new_df = pd.DataFrame(data=list((typ - ma) / (md * 0.015)), columns=["cci"])
return new_df
def OBV(df):
"""能量潮"""
lc = df["close"].shift(1)
obv = (np.where(df["close"] > lc, df["volume"], np.where(df["close"] < lc, -df["volume"], 0))).cumsum()
new_df = pd.DataFrame(data=obv, columns=["obv"])
return new_df
def CDP(df, n):
"""逆势操作"""
new_df = pd.DataFrame()
pt = df["high"].shift(1) - df["low"].shift(1)
cdp = (df["high"].shift(1) + df["low"].shift(1) + df["close"].shift(1)) / 3
new_df["ah"] = ta_func.ma(cdp + pt, n)
new_df["al"] = ta_func.ma(cdp - pt, n)
new_df["nh"] = ta_func.ma(2 * cdp - df["low"], n)
new_df["nl"] = ta_func.ma(2 * cdp - df["high"], n)
return new_df
def HCL(df, n):
"""均线通道"""
new_df = pd.DataFrame()
new_df["mah"] = ta_func.ma(df["high"], n)
new_df["mal"] = ta_func.ma(df["low"], n)
new_df["mac"] = ta_func.ma(df["close"], n)
return new_df
def ENV(df, n, k):
"""包略线 (Envelopes)"""
new_df = pd.DataFrame()
new_df["upper"] = ta_func.ma(df["close"], n) * (1 + k / 100)
new_df["lower"] = ta_func.ma(df["close"], n) * (1 - k / 100)
return new_df
def MIKE(df, n):
"""麦克指标"""
new_df = pd.DataFrame()
typ = (df["high"] + df["low"] + df["close"]) / 3
ll = df["low"].rolling(n).min()
hh = df["high"].rolling(n).max()
new_df["wr"] = typ + (typ - ll)
new_df["mr"] = typ + (hh - ll)
new_df["sr"] = 2 * hh - ll
new_df["ws"] = typ - (hh - typ)
new_df["ms"] = typ - (hh - ll)
new_df["ss"] = 2 * ll - hh
return new_df
def PUBU(df, m):
"""瀑布线"""
pb = (ta_func.ema(df["close"], m) + ta_func.ma(df["close"], m * 2) + ta_func.ma(df["close"], m * 4)) / 3
new_df = pd.DataFrame(data=list(pb), columns=["pb"])
return new_df
def BBI(df, n1, n2, n3, n4):
"""多空指数"""
bbi = (ta_func.ma(df["close"], n1) + ta_func.ma(df["close"], n2) + ta_func.ma(df["close"], n3) + ta_func.ma(
df["close"], n4)) / 4
new_df = pd.DataFrame(data=list(bbi), columns=["bbi"])
return new_df
def DKX(df, m):
"""多空线"""
new_df = pd.DataFrame()
a = (3 * df["close"] + df["high"] + df["low"] + df["open"]) / 6
new_df["b"] = (20 * a + 19 * a.shift(1) + 18 * a.shift(2) + 17 * a.shift(3) + 16 * a.shift(4) + 15 * a.shift(
5) + 14 * a.shift(6)
+ 13 * a.shift(7) + 12 * a.shift(8) + 11 * a.shift(9) + 10 * a.shift(10) + 9 * a.shift(
11) + 8 * a.shift(
12) + 7 * a.shift(13) + 6 * a.shift(14) + 5 * a.shift(15) + 4 * a.shift(16) + 3 * a.shift(
17) + 2 * a.shift(18) + a.shift(20)
) / 210
new_df["d"] = ta_func.ma(new_df["b"], m)
return new_df
def BBIBOLL(df, n, m):
"""多空布林线"""
new_df = pd.DataFrame()
new_df["bbiboll"] = (ta_func.ma(df["close"], 3) + ta_func.ma(df["close"], 6) + ta_func.ma(df["close"],
12) + ta_func.ma(
df["close"], 24)) / 4
new_df["upr"] = new_df["bbiboll"] + m * new_df["bbiboll"].rolling(n).std()
new_df["dwn"] = new_df["bbiboll"] - m * new_df["bbiboll"].rolling(n).std()
return new_df
def ADTM(df, n, m):
"""动态买卖气指标"""
new_df = pd.DataFrame()
dtm = np.where(df["open"] < df["open"].shift(1), 0,
np.where(df["high"] - df["open"] > df["open"] - df["open"].shift(1), df["high"] - df["open"],
df["open"] - df["open"].shift(1)))
dbm = np.where(df["open"] >= df["open"].shift(1), 0,
np.where(df["open"] - df["low"] > df["open"] - df["open"].shift(1), df["open"] - df["low"],
df["open"] - df["open"].shift(1)))
stm = pd.Series(dtm).rolling(n).sum()
sbm = pd.Series(dbm).rolling(n).sum()
new_df["adtm"] = np.where(stm > sbm, (stm - sbm) / stm, np.where(stm == sbm, 0, (stm - sbm) / sbm))
new_df["adtmma"] = ta_func.ma(new_df["adtm"], m)
return new_df
def B3612(df):
"""三减六日乖离率"""
new_df = pd.DataFrame()
new_df["b36"] = ta_func.ma(df["close"], 3) - ta_func.ma(df["close"], 6)
new_df["b612"] = ta_func.ma(df["close"], 6) - ta_func.ma(df["close"], 12)
return new_df
def DBCD(df, n, m, t):
"""异同离差乖离率"""
new_df = pd.DataFrame()
bias = (df["close"] - ta_func.ma(df["close"], n)) / ta_func.ma(df["close"], n)
dif = bias - bias.shift(m)
new_df["dbcd"] = ta_func.sma(dif, t, 1)
new_df["mm"] = ta_func.ma(new_df["dbcd"], 5)
return new_df
def DDI(df, n, n1, m, m1):
"""方向标准离差指数"""
new_df = pd.DataFrame()
tr = np.where(np.absolute(df["high"] - df["high"].shift(1)) > np.absolute(df["low"] - df["low"].shift(1)),
np.absolute(df["high"] - df["high"].shift(1)), np.absolute(df["low"] - df["low"].shift(1)))
dmz = np.where((df["high"] + df["low"]) <= (df["high"].shift(1) + df["low"].shift(1)), 0, tr)
dmf = np.where((df["high"] + df["low"]) >= (df["high"].shift(1) + df["low"].shift(1)), 0, tr)
diz = pd.Series(dmz).rolling(n).sum() / (pd.Series(dmz).rolling(n).sum() + | pd.Series(dmf) | pandas.Series |
#!/usr/bin/env python3.5
""" Predict GE using trained GNN model """
import argparse
import subprocess
import os, sys
import numpy as np
import pandas as pd
_script_dir = os.path.dirname(os.path.realpath(__file__))
def get_arg_parser():
""" Build command line parser
Returns:
command line parser
"""
parser = argparse.ArgumentParser(description='Predict Gene Expression using a trained GNN model.')
parser.add_argument('--input', metavar='gnn_input.csv', type=str,
required=True, dest="gnn_input",
help='Knockout info and Master Regulator expressions.')
parser.add_argument('--output', metavar='gnn_pred.csv', type=str,
required=True, dest="output_filename",
help='Predicted gene expression values.')
parser.add_argument('--load-model-dir', metavar='./model_dir', type=str,
required=False, dest="model_dir", default="./model_dir",
help='Directory to load trained GNN model.')
return parser
def get_ko_vector(ko_str, nmr_list):
ko_vector = np.ones(len(nmr_list))
if type(ko_str) is str and len(ko_str)>0 :
ko_str_parts = ko_str.split("&")
for gname in ko_str_parts:
if gname in nmr_list:
gidx = nmr_list.index(gname)
ko_vector[gidx] = 0
return ko_vector
def get_ko_binary_df(ko_df, nmr_list):
def get_ko(x):
return get_ko_vector(x, nmr_list)
ko_data = ko_df.apply(get_ko)
df = pd.DataFrame.from_records(ko_data, columns=nmr_list)
return df
def get_mr_nmr(dep_filename):
mr_list = []
nmr_list = []
with open(dep_filename, "r") as f:
line = f.readline()
while line:
parts = line.strip().split(":")
if (len(parts[1]) < 1):
mr_list.append(parts[0])
else:
nmr_list.append(parts[0])
line = f.readline()
return mr_list, nmr_list
def prep_data(input_data_filename, model_dir):
df = | pd.read_csv(input_data_filename) | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
sg = StringGrouper(df1, min_similarity=0.1)
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def test_overflow_error_with(OverflowThreshold, n_blocks):
nonlocal sg
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
max_left_block_size = (len(df1)//n_blocks[0]
+ (1 if len(df1) % n_blocks[0] > 0 else 0))
max_right_block_size = (len(df1)//n_blocks[1]
+ (1 if len(df1) % n_blocks[1] > 0 else 0))
if (max_left_block_size + max_right_block_size) > OverflowThreshold:
with self.assertRaises(Exception):
_ = sg.match_strings(df1, n_blocks=n_blocks)
else:
matches_manual = fix_row_order(sg.match_strings(df1, n_blocks=n_blocks))
pd.testing.assert_frame_equal(matches11, matches_manual)
test_overflow_error_with(OverflowThreshold=100, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(2, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 2))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(4, 4))
def test_n_blocks_both_DataFrames(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df['Customer Name']
df2 = simple_example.customers_df2['Customer Name']
matches11 = fix_row_order(match_strings(df1, df2, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, df2, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
def test_n_blocks_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=2)
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(0, 2))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2.5))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2, 3))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, ))
def test_tfidf_dtype_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=None)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=0)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype='whatever')
def test_compute_pairwise_similarities(self):
"""tests the high-level function compute_pairwise_similarities"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
similarities = compute_pairwise_similarities(df1, df2)
expected_result = pd.Series(
[
1.0,
0.6336195351561589,
1.0000000000000004,
1.0000000000000004,
1.0,
0.826462625999832
],
name='similarity'
)
expected_result = expected_result.astype(np.float32)
pd.testing.assert_series_equal(expected_result, similarities)
sg = StringGrouper(df1, df2)
similarities = sg.compute_pairwise_similarities(df1, df2)
pd.testing.assert_series_equal(expected_result, similarities)
def test_compute_pairwise_similarities_data_integrity(self):
"""tests that an exception is raised whenever the lengths of the two input series of the high-level function
compute_pairwise_similarities are unequal"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
with self.assertRaises(Exception):
_ = compute_pairwise_similarities(df1, df2[:-2])
@patch('string_grouper.string_grouper.StringGrouper')
def test_group_similar_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function group_similar_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = group_similar_strings(
test_series_1,
string_ids=test_series_id_1
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_most_similar(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_most_similar utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_2 = None
test_series_id_1 = None
test_series_id_2 = None
df = match_most_similar(
test_series_1,
test_series_2,
master_id=test_series_id_1,
duplicates_id=test_series_id_2
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_matches.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = match_strings(test_series_1, master_id=test_series_id_1)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_matches.assert_called_once()
self.assertEqual(df, 'whatever')
@patch(
'string_grouper.string_grouper.StringGrouper._symmetrize_matrix',
side_effect=mock_symmetrize_matrix
)
def test_match_list_symmetry_without_symmetrize_function(self, mock_symmetrize_matrix_param):
"""mocks StringGrouper._symmetrize_matches_list so that this test fails whenever _matches_list is
**partially** symmetric which often occurs when the kwarg max_n_matches is too small"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
mock_symmetrize_matrix_param.assert_called_once()
# obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# switch the column names of lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# if the intersection is empty then _matches_list is completely non-symmetric (this is acceptable)
# if the intersection is not empty then at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertFalse(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
def test_match_list_symmetry_with_symmetrize_function(self):
"""This test ensures that _matches_list is symmetric"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
# Obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# Switch the column names of the lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# Obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# If the intersection is empty this means _matches_list is completely non-symmetric (this is acceptable)
# If the intersection is not empty this means at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertTrue(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
@patch(
'string_grouper.string_grouper.StringGrouper._fix_diagonal',
side_effect=mock_symmetrize_matrix
)
def test_match_list_diagonal_without_the_fix(self, mock_fix_diagonal):
"""test fails whenever _matches_list's number of self-joins is not equal to the number of strings"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['<NAME>']
matches = match_strings(df, max_n_matches=1)
mock_fix_diagonal.assert_called_once()
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertNotEqual(num_self_joins, num_strings)
def test_match_list_diagonal(self):
"""This test ensures that all self-joins are present"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['Customer Name']
matches = match_strings(df, max_n_matches=1)
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertEqual(num_self_joins, num_strings)
def test_zero_min_similarity(self):
"""Since sparse matrices exclude zero elements, this test ensures that zero similarity matches are
returned when min_similarity <= 0. A bug related to this was first pointed out by @nbcvijanovic"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.whatever_series_1
matches = match_strings(s_master, s_dup, min_similarity=0)
pd.testing.assert_frame_equal(simple_example.expected_result_with_zeroes, matches)
def test_zero_min_similarity_small_max_n_matches(self):
"""This test ensures that a warning is issued when n_max_matches is suspected to be too small while
min_similarity <= 0 and include_zeroes is True"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.two_strings
with self.assertRaises(Exception):
_ = match_strings(s_master, s_dup, max_n_matches=1, min_similarity=0)
def test_get_non_matches_empty_case(self):
"""This test ensures that _get_non_matches() returns an empty DataFrame when all pairs of strings match"""
simple_example = SimpleExample()
s_master = simple_example.a_few_strings
s_dup = simple_example.one_string
sg = StringGrouper(s_master, s_dup, max_n_matches=len(s_master), min_similarity=0).fit()
self.assertTrue(sg._get_non_matches_list().empty)
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
c = csr_matrix([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
expected_matches = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe)[0].toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = pd.Series(['FOO', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_get_matches_two_dataframes(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2).fit()
left_side = ['foo', 'bar']
left_index = [0, 1]
right_side = ['foo', 'bar']
right_index = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side,
'similarity': similarity,
'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_single(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
sg = StringGrouper(test_series_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
left_index = [0, 3, 1, 2, 0, 3]
right_index = [0, 0, 1, 2, 3, 3]
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side,
'similarity': similarity,
'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_1_series_1_id_series(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2', 'A3'])
sg = StringGrouper(test_series_1, master_id=test_series_id_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
left_side_id = ['A0', 'A3', 'A1', 'A2', 'A0', 'A3']
left_index = [0, 3, 1, 2, 0, 3]
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side_id = ['A0', 'A0', 'A1', 'A2', 'A3', 'A3']
right_index = [0, 0, 1, 2, 3, 3]
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side, 'left_id': left_side_id,
'similarity': similarity,
'right_id': right_side_id, 'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_2_series_2_id_series(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
test_series_id_2 = pd.Series(['B0', 'B1', 'B2'])
sg = StringGrouper(test_series_1, test_series_2, duplicates_id=test_series_id_2,
master_id=test_series_id_1).fit()
left_side = ['foo', 'bar']
left_side_id = ['A0', 'A1']
left_index = [0, 1]
right_side = ['foo', 'bar']
right_side_id = ['B0', 'B1']
right_index = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side, 'left_id': left_side_id,
'similarity': similarity,
'right_id': right_side_id, 'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_raises_exception_if_unexpected_options_given(self):
# When the input id data does not correspond with its string data:
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
bad_test_series_id_1 = pd.Series(['A0', 'A1'])
good_test_series_id_1 = pd.Series(['A0', 'A1', 'A2'])
test_series_2 = | pd.Series(['foo', 'bar', 'bop']) | pandas.Series |
# Rutina que preprocesa y transforma los datos para series de tiempo
# <NAME>
# <NAME>
# ------------------------------------------------------------------
# Entrada: 2 o mas archivos .csv asincronos.
# Salida: Archivo binario hdf5 con chunks de datos sincronizados
#
# Cada archivo csv debe tener una columna temporal, con un sampling
# que puede ser arbitrario y asincrono respecto a los otros archivos
# Pueden haber gaps en los datos
#
# El parametro de entrada es un archivo json del tipo:
# config/sma_uai_alldata.json
# Este archivo contiene informacion de los archivos de entrada/salida,
# las columnas y variables de conversion.
#
# El programa toma los archivos y busca bloques continuos de datos
# en donde ninguna columna tenga un gap mayor a
# "max_delta_between_blocks_sec" segundos, si es menor
# los datos se interpolaran
# Cada bloque continuo de datos o "chunk" debe tener una duracion
# minima de "min_chunk_duration_sec" segundos o es descartado
# Luego los datos son sincronizados con un sampling uniforme
# dado por "sample_spacing"
#
# como resultado en el archivo hdf5 se esribiran multiples chunks
# de datos continuos, si "min_chunk_duration_sec" es muy grande
# y se genera 1 o muy pocos chunks, no es conveniente ya que
# para construir el train/testing set deben haber suficientes
# chunks para asignarse a cada set. i.e. 20 chunks iguales
# implica un split de train/testing con un error del 5% al menos
# respecto al ratio definido.
import os
import sys
import json
import pandas
import numpy
# busca bloques continuos de tiempo donde no ocurra una separacion
# de tiempo mayor que maxStep y donde el bloque tenga un largo al
# menos de minDuration.
def get_cont_chunks(df, date_col, maxStep, minDuration):
timeArray = df[date_col]
# indexes where a discontinuity in time occurs
#ojo que funciona solo para numpy 1.20+
idxs = numpy.where(numpy.diff(timeArray) > maxStep)[0]
if len(idxs) == 0:
# trick: add a last time sample with a huge jump to make the routine
# consider a contiguous block as a single block of data
timeArray_hack = numpy.concatenate([timeArray, 1e45])
numpy.where(numpy.diff(timeArray_hack) > maxStep)[0]
return [0, timeArray.size]
print("found {} discontinuities".format(len(idxs)))
leftIdx = 0
rightIdx = -1
interval_list = list()
for idx in idxs:
rightIdx = idx
duration = timeArray[rightIdx] - timeArray[leftIdx]
if duration > minDuration:
interv = | pandas.Interval(timeArray[leftIdx], timeArray[rightIdx]) | pandas.Interval |
import pathlib
import pytest
import pandas as pd
import numpy as np
import gradelib
EXAMPLES_DIRECTORY = pathlib.Path(__file__).parent / "examples"
GRADESCOPE_EXAMPLE = gradelib.Gradebook.from_gradescope(
EXAMPLES_DIRECTORY / "gradescope.csv"
)
CANVAS_EXAMPLE = gradelib.Gradebook.from_canvas(EXAMPLES_DIRECTORY / "canvas.csv")
# the canvas example has Lab 01, which is also in Gradescope. Let's remove it
CANVAS_WITHOUT_LAB_EXAMPLE = gradelib.Gradebook(
points=CANVAS_EXAMPLE.points.drop(columns="lab 01"),
maximums=CANVAS_EXAMPLE.maximums.drop(index="lab 01"),
late=CANVAS_EXAMPLE.late.drop(columns="lab 01"),
dropped=CANVAS_EXAMPLE.dropped.drop(columns="lab 01"),
)
# given
ROSTER = gradelib.read_egrades_roster(EXAMPLES_DIRECTORY / "egrades.csv")
def assert_gradebook_is_sound(gradebook):
assert gradebook.points.shape == gradebook.dropped.shape == gradebook.late.shape
assert (gradebook.points.columns == gradebook.dropped.columns).all()
assert (gradebook.points.columns == gradebook.late.columns).all()
assert (gradebook.points.index == gradebook.dropped.index).all()
assert (gradebook.points.index == gradebook.late.index).all()
assert (gradebook.points.columns == gradebook.maximums.index).all()
# assignments property
# -----------------------------------------------------------------------------
def test_assignments_are_produced_in_order():
assert list(GRADESCOPE_EXAMPLE.assignments) == list(
GRADESCOPE_EXAMPLE.points.columns
)
# keep_pids()
# -----------------------------------------------------------------------------
def test_keep_pids():
# when
actual = GRADESCOPE_EXAMPLE.keep_pids(ROSTER.index)
# then
assert len(actual.pids) == 3
assert_gradebook_is_sound(actual)
def test_keep_pids_raises_if_pid_does_not_exist():
# given
pids = ["A12345678", "ADNEDNE00"]
# when
with pytest.raises(KeyError):
actual = GRADESCOPE_EXAMPLE.keep_pids(pids)
# keep_assignments() and remove_assignments()
# -----------------------------------------------------------------------------
def test_keep_assignments():
# when
actual = GRADESCOPE_EXAMPLE.keep_assignments(["homework 01", "homework 02"])
# then
assert set(actual.assignments) == {"homework 01", "homework 02"}
assert_gradebook_is_sound(actual)
def test_keep_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.keep_assignments(assignments)
def test_remove_assignments():
# when
actual = GRADESCOPE_EXAMPLE.remove_assignments(
GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
)
# then
assert set(actual.assignments) == {
"homework 01",
"homework 02",
"homework 03",
"homework 04",
"homework 05",
"homework 06",
"homework 07",
"project 01",
"project 02",
}
assert_gradebook_is_sound(actual)
def test_remove_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.remove_assignments(assignments)
# combine()
# -----------------------------------------------------------------------------
def test_combine_with_keep_pids():
# when
combined = gradelib.Gradebook.combine(
[GRADESCOPE_EXAMPLE, CANVAS_WITHOUT_LAB_EXAMPLE], keep_pids=ROSTER.index
)
# then
assert "homework 01" in combined.assignments
assert "midterm exam" in combined.assignments
assert_gradebook_is_sound(combined)
def test_combine_raises_if_duplicate_assignments():
# the canvas example and the gradescope example both have lab 01.
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine([GRADESCOPE_EXAMPLE, CANVAS_EXAMPLE])
def test_combine_raises_if_indices_do_not_match():
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine(
[CANVAS_WITHOUT_LAB_EXAMPLE, GRADESCOPE_EXAMPLE]
)
# number_of_lates()
# -----------------------------------------------------------------------------
def test_number_of_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=labs)
# then
assert list(actual) == [1, 4, 2, 2]
def test_number_of_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=[])
def test_number_of_lates_with_no_assignment_list_uses_all_assignments():
# when
actual = GRADESCOPE_EXAMPLE.number_of_lates()
# then
assert list(actual) == [1, 5, 2, 2]
# forgive_lates()
# -----------------------------------------------------------------------------
def test_forgive_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [0, 1, 0, 0]
assert_gradebook_is_sound(actual)
def test_forgive_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=[])
def test_forgive_lates_forgives_the_first_n_lates():
# by "first", we mean in the order specified by the `within` argument
# student A10000000 had late lab 01, 02, 03, and 07
assignments = ["lab 02", "lab 07", "lab 01", "lab 03"]
# when
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=2, within=assignments)
# then
assert not actual.late.loc["A10000000", "lab 02"]
assert not actual.late.loc["A10000000", "lab 07"]
assert actual.late.loc["A10000000", "lab 01"]
assert actual.late.loc["A10000000", "lab 03"]
def test_forgive_lates_does_not_forgive_dropped():
# given
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
dropped = GRADESCOPE_EXAMPLE.dropped.copy()
dropped.iloc[:, :] = True
example = gradelib.Gradebook(
points=GRADESCOPE_EXAMPLE.points,
maximums=GRADESCOPE_EXAMPLE.maximums,
late=GRADESCOPE_EXAMPLE.late,
dropped=dropped,
)
# when
actual = example.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [1, 4, 2, 2]
assert_gradebook_is_sound(actual)
# drop_lowest()
# -----------------------------------------------------------------------------
def test_drop_lowest_on_simple_example_1():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(1, within=homeworks)
# then
assert actual.dropped.iloc[0, 1]
assert actual.dropped.iloc[1, 2]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_on_simple_example_2():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(2, within=homeworks)
# then
assert not actual.dropped.iloc[0, 2]
assert not actual.dropped.iloc[1, 0]
assert list(actual.dropped.sum(axis=1)) == [2, 2]
assert_gradebook_is_sound(actual)
def test_drop_lowest_counts_lates_as_zeros():
# given
columns = ["hw01", "hw02"]
p1 = pd.Series(data=[10, 5], index=columns, name="A1")
p2 = pd.Series(data=[10, 10], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([10, 10], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.late.iloc[0, 0] = True
# since A1's perfect homework is late, it should count as zero and be
# dropped
# when
actual = gradebook.drop_lowest(1)
# then
assert actual.dropped.iloc[0, 0]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_ignores_assignments_alread_dropped():
# given
columns = ["hw01", "hw02", "hw03", "hw04"]
p1 = pd.Series(data=[9, 0, 7, 0], index=columns, name="A1")
p2 = pd.Series(data=[10, 10, 10, 10], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([10, 10, 10, 10], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
gradebook.dropped.loc["A1", "hw02"] = True
gradebook.dropped.loc["A1", "hw04"] = True
# since A1's perfect homeworks are already dropped, we should drop a third
# homework, too: this will be HW03
# when
actual = gradebook.drop_lowest(1)
# then
assert actual.dropped.loc["A1", "hw04"]
assert actual.dropped.loc["A1", "hw02"]
assert actual.dropped.loc["A1", "hw03"]
assert list(actual.dropped.sum(axis=1)) == [3, 1]
assert_gradebook_is_sound(actual)
# give_equal_weights()
# -----------------------------------------------------------------------------
def test_give_equal_weights_on_example():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.give_equal_weights(within=homeworks)
# then
assert actual.maximums.loc["hw01"] == 1
assert actual.maximums.loc["hw02"] == 1
assert actual.maximums.loc["hw03"] == 1
assert actual.maximums.loc["lab01"] == 20
assert actual.points.loc["A1", "hw01"] == 1 / 2
assert actual.points.loc["A1", "hw02"] == 30 / 50
# score()
# -----------------------------------------------------------------------------
def test_score_on_simple_example():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# when
actual = gradebook.score(homeworks)
# then
assert np.allclose(actual.values, [121 / 152, 24 / 152], atol=1e-6)
def test_score_counts_lates_as_zero():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = | pd.Series(data=[2, 7, 15, 20], index=columns, name="A2") | pandas.Series |
from copy import deepcopy
import inspect
import pydoc
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.util._test_decorators import (
async_mark,
skip_if_no,
)
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
timedelta_range,
)
import pandas._testing as tm
class TestDataFrameMisc:
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_empty(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
tm.assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import unittest
from unittest import mock
import pandas as pd
from matplotlib import pyplot as plt
import dataprofiler as dp
from dataprofiler.profilers import IntColumn
from dataprofiler.reports import graphs
@mock.patch("dataprofiler.reports.graphs.plt.show")
@mock.patch("dataprofiler.reports.graphs.plot_col_histogram")
class TestPlotHistograms(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = [[1, 'a', 1.0, '1/2/2021'],
[None, 'b', None, '1/2/2020'],
[3, 'c', 3.5, '1/2/2022'],
[4, 'd', 4.5, '1/2/2023'],
[5, 'e', 6.0, '5/2/2020'],
[None, 'f', None, '1/5/2020'],
[1, 'g', 1.0, '2/5/2020'],
[None, 1, 10.0, '3/5/2020']]
cls.options = dp.ProfilerOptions()
cls.options.set({"data_labeler.is_enabled": False})
cls.profiler = dp.StructuredProfiler(cls.data, options=cls.options)
def test_no_columns_specified(self, plot_col_mock, plt_mock):
graphsplot = graphs.plot_histograms(self.profiler)
self.assertEqual(2, plot_col_mock.call_count)
# grabs the first argument passed into the plot col call and validates
# it is the column profiler and its name matches what we expect it to
self.assertEqual(0, plot_col_mock.call_args_list[0][0][0].name)
# grabs the second argument passed into the plot col call and validates
# it is the column profiler and its name matches what we expect it to
self.assertEqual(2, plot_col_mock.call_args_list[1][0][0].name)
self.assertIsInstance(graphsplot, plt.Figure)
def test_normal(self, plot_col_mock, plt_mock):
graphsplot = graphs.plot_histograms(self.profiler, [2])
self.assertEqual(1, plot_col_mock.call_count)
self.assertEqual(2, plot_col_mock.call_args_list[0][0][0].name)
self.assertIsInstance(graphsplot, plt.Figure)
def test_bad_column_name(self, plot_col_mock, plt_mock):
with self.assertRaisesRegex(ValueError,
"Column \"a\" is not found as a profiler "
"column"):
graphs.plot_histograms(self.profiler, [0, "a"])
def test_no_column_plottable(self, plot_col_mock, plt_mock):
with self.assertWarnsRegex(Warning, "No plots were constructed"
" because no int or float columns "
"were found in columns"):
graphs.plot_histograms(self.profiler, [1, 3])
def test_empty_profiler(self, plot_col_mock, plt_mock):
with self.assertWarnsRegex(Warning, "No plots were constructed"
" because no int or float columns "
"were found in columns"):
graphs.plot_histograms(
dp.StructuredProfiler(data=None, options=self.options))
@mock.patch("dataprofiler.reports.graphs.plt.show")
class TestPlotColHistogram(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = pd.Series([1, 2, 4, 2, 5, 35, 32], dtype=str)
cls.profiler = IntColumn('test')
cls.profiler.update(cls.data)
def test_normal(self, plt_mock):
self.assertIsInstance(graphs.plot_col_histogram(self.profiler),
plt.Axes)
def test_empty_data(self, plt_mock):
data = | pd.Series([], dtype=str) | pandas.Series |
import numpy as np
import pandas as pd
import pickle as pkl
import tensorflow as tf
from optparse import OptionParser
import config
from inputs.data import load_question, load_train, load_test
from inputs.data import init_embedding_matrix
from models.model_library import get_model
from utils import log_utils, os_utils, time_utils
params = {
"model_name": "semantic_matching",
"offline_model_dir": "./weights/semantic_matching",
"summary_dir": "../summary",
"construct_neg": False,
"augmentation_init_permutation": 0.5,
"augmentation_min_permutation": 0.01,
"augmentation_permutation_decay_steps": 2000,
"augmentation_permutation_decay_rate": 0.975,
"augmentation_init_dropout": 0.5,
"augmentation_min_dropout": 0.01,
"augmentation_dropout_decay_steps": 2000,
"augmentation_dropout_decay_rate": 0.975,
"use_features": False,
"num_features": 1,
"n_runs": 10,
"batch_size": 128,
"epoch": 50,
"max_batch": -1,
"l2_lambda": 0.000,
# embedding
"embedding_dropout": 0.3,
"embedding_dim_word": init_embedding_matrix["word"].shape[1],
"embedding_dim_char": init_embedding_matrix["char"].shape[1],
"embedding_dim": init_embedding_matrix["word"].shape[1],
"embedding_dim_compressed": 32,
"embedding_trainable": True,
"embedding_mask_zero": True,
"max_num_word": init_embedding_matrix["word"].shape[0],
"max_num_char": init_embedding_matrix["char"].shape[0],
"threshold": 0.217277,
"calibration": False,
"max_seq_len_word": 12,
"max_seq_len_char": 20,
"pad_sequences_padding": "post",
"pad_sequences_truncating": "post",
# optimization
"optimizer_type": "lazyadam",
"init_lr": 0.001,
"beta1": 0.9,
"beta2": 0.999,
"decay_steps": 2000,
"decay_rate": 0.95,
"schedule_decay": 0.004,
"random_seed": 2018,
"eval_every_num_update": 5000,
# semantic feature layer
"encode_method": "textcnn",
"attend_method": ["ave", "max", "min", "self-scalar-attention"],
"attention_dim": 64,
"attention_num_heads": 1,
# cnn
"cnn_num_layers": 1,
"cnn_num_filters": 32,
"cnn_filter_sizes": [1, 2, 3],
"cnn_timedistributed": False,
"cnn_activation": tf.nn.relu,
"cnn_gated_conv": False,
"cnn_residual": False,
"rnn_num_units": 32,
"rnn_cell_type": "gru",
"rnn_num_layers": 1,
# fc block
"fc_type": "fc",
"fc_hidden_units": [64*4, 64*2, 64],
"fc_dropouts": [0, 0, 0],
# True: cosine(l1, l2), sum(abs(l1 - l2))
# False: l1 * l2, abs(l1 - l2)
"similarity_aggregation": False,
# match pyramid
"mp_num_filters": [8, 16],
"mp_filter_sizes": [5, 3],
"mp_activation": tf.nn.relu,
"mp_dynamic_pooling": False,
"mp_pool_sizes_word": [6, 3],
"mp_pool_sizes_char": [10, 5],
# bcnn
"bcnn_num_layers": 2,
"bcnn_num_filters": 16,
"bcnn_filter_size": 3,
"bcnn_activation": tf.nn.tanh, # tf.nn.relu with euclidean/euclidean_exp produce nan
"bcnn_match_score_type": "cosine",
"bcnn_mp_att_pooling": False,
"bcnn_mp_num_filters": [8, 16],
"bcnn_mp_filter_sizes": [5, 3],
"bcnn_mp_activation": tf.nn.relu,
"bcnn_mp_dynamic_pooling": False,
"bcnn_mp_pool_sizes_word": [6, 3],
"bcnn_mp_pool_sizes_char": [10, 5],
# final layer
"final_dropout": 0.3,
}
def get_model_data(df, features, params):
X = {
"q1": df.q1.values,
"q2": df.q2.values,
"label": df.label.values,
}
if params["use_features"]:
X.update({
"features": features,
})
params["num_features"] = X["features"].shape[1]
return X
def downsample(df):
# downsample negative
num_pos = np.sum(df.label)
num_neg = int((1. / config.POS_RATIO_OFFLINE - 1.) * num_pos)
idx_pos = np.where(df.label == 1)[0]
idx_neg = np.where(df.label == 0)[0]
np.random.shuffle(idx_neg)
idx = np.hstack([idx_pos, idx_neg[:num_neg]])
return df.loc[idx]
def get_train_valid_test_data(augmentation=False):
# load data
Q = load_question(params)
dfTrain = load_train()
dfTest = load_test()
# train_features = load_feat("train")
# test_features = load_feat("test")
# params["num_features"] = train_features.shape[1]
# load split
with open(config.SPLIT_FILE, "rb") as f:
train_idx, valid_idx = pkl.load(f)
# validation
if augmentation:
dfDev = | pd.read_csv(config.DATA_DIR + "/" + "dev_aug.csv") | pandas.read_csv |
import argparse
import numpy as np
import pandas as pd
from bashplotlib.histogram import plot_hist
from scipy.stats import gamma, beta, norm, randint, bernoulli
from eemeter.location import zipcode_to_station
from eemeter.weather import TMY3WeatherSource
from eemeter.weather import GSODWeatherSource
from eemeter.models.temperature_sensitivity import AverageDailyTemperatureSensitivityModel
from eemeter.meter import AnnualizedUsageMeter
from eemeter.location import Location
from eemeter.generator import generate_monthly_billing_datetimes
from eemeter.evaluation import Period
from eemeter.project import Project
from eemeter.consumption import ConsumptionData
from datetime import datetime, date, timedelta
import uuid
try:
import configparser
except ImportError: # python 2
from backports import configparser
TEMPERATURE_UNIT_STR = "degF"
BINCOUNT = 79
def plot_gamma(k, theta):
sample = gamma.rvs(k, scale=theta, size=10000)
plot_hist(sample, bincount=BINCOUNT, height=10, xlab=True)
def plot_beta(a, b, max=1.0):
sample = beta.rvs(a, b, size=10000) * max
plot_hist(sample, bincount=BINCOUNT, height=10, xlab=True)
def plot_norm(mean, variation):
sample = norm.rvs(mean, variation, size=10000)
plot_hist(sample, bincount=BINCOUNT, height=10, xlab=True)
def get_weather_sources(station):
weather_source = GSODWeatherSource(station, 2007, 2015)
weather_normal_source = TMY3WeatherSource(station)
if weather_source.data == {} or weather_normal_source.data == {}:
message = "Insufficient weather data for station {}. Please choose " \
"a different weather station (by selecting a different " \
"zipcode).".format(station)
return weather_source, weather_normal_source
def find_best_params(usage_pre_retrofit_gas, usage_pre_retrofit_electricity,
usage_post_retrofit_gas, usage_post_retrofit_electricity,
weather_normal_source):
model_e = AverageDailyTemperatureSensitivityModel(heating=True, cooling=True)
model_g = AverageDailyTemperatureSensitivityModel(heating=True, cooling=False)
# find target model params
start_params_e = model_e.param_type({
'base_daily_consumption': usage_pre_retrofit_electricity / 500,
'heating_balance_temperature': 62,
'heating_slope': usage_pre_retrofit_electricity / 6000,
'cooling_balance_temperature': 68,
'cooling_slope': usage_pre_retrofit_electricity / 6000,
})
start_params_g = model_g.param_type({
'base_daily_consumption': usage_pre_retrofit_gas / 700,
'heating_balance_temperature': 62,
'heating_slope': usage_pre_retrofit_gas / 6000,
})
# params and scale factors
params_to_change_e = [
('base_daily_consumption', 2),
('heating_slope', .3),
('cooling_slope', .3)]
params_to_change_g = [
('base_daily_consumption', 2),
('heating_slope', .3)]
params_e_pre, ann_usage_e_pre = find_best_annualized_usage_params(
usage_pre_retrofit_electricity, model_e, start_params_e, params_to_change_e, weather_normal_source)
params_e_post, ann_usage_e_post = find_best_annualized_usage_params(
usage_post_retrofit_electricity, model_e, params_e_pre, params_to_change_e, weather_normal_source)
params_g_pre, ann_usage_g_pre = find_best_annualized_usage_params(
usage_pre_retrofit_gas, model_g, start_params_g, params_to_change_g, weather_normal_source)
params_g_post, ann_usage_g_post = find_best_annualized_usage_params(
usage_post_retrofit_gas, model_g, params_g_pre, params_to_change_g, weather_normal_source)
return params_e_pre, params_e_post, params_g_pre, params_g_post, \
ann_usage_e_pre, ann_usage_e_post, ann_usage_g_pre, ann_usage_g_post
def find_best_annualized_usage_params(target_annualized_usage, model,
start_params, params_to_change, weather_normal_source, n_guesses=100):
best_params = start_params
meter = AnnualizedUsageMeter(model=model, temperature_unit_str=TEMPERATURE_UNIT_STR)
best_result = meter.evaluate_raw(model_params=best_params, weather_normal_source=weather_normal_source)
best_ann_usage = best_result["annualized_usage"][0]
for n in range(n_guesses):
resolution = abs((target_annualized_usage - best_ann_usage) / target_annualized_usage)
param_dict = best_params.to_dict()
for param_name,scale_factor in params_to_change:
current_value = param_dict[param_name]
current_value = norm.rvs(param_dict[param_name], resolution * scale_factor)
while current_value < 0:
current_value = norm.rvs(param_dict[param_name], resolution * scale_factor)
param_dict[param_name] = current_value
model_params = model.param_type(param_dict)
result = meter.evaluate_raw(model_params=model_params, weather_normal_source=weather_normal_source)
ann_usage = result["annualized_usage"][0]
if abs(target_annualized_usage - ann_usage) < abs(target_annualized_usage - best_ann_usage):
diff = abs(target_annualized_usage - best_ann_usage)
best_params = model_params
best_ann_usage = ann_usage
return best_params, best_ann_usage
def create_project(params_e_pre, params_e_post, params_g_pre, params_g_post,
baseline_period_start_date, baseline_period_end_date,
reporting_period_start_date, reporting_period_end_date,
has_electricity, has_gas, weather_source, zipcode):
model_e = AverageDailyTemperatureSensitivityModel(heating=True, cooling=True)
model_g = AverageDailyTemperatureSensitivityModel(heating=True, cooling=False)
# generate consumption
baseline_period = Period(baseline_period_start_date, reporting_period_start_date)
datetimes_pre = generate_monthly_billing_datetimes(baseline_period, dist=randint(29,31))
reporting_period = Period(datetimes_pre[-1], reporting_period_end_date)
datetimes_post = generate_monthly_billing_datetimes(reporting_period, dist=randint(29,31))
location = Location(zipcode=zipcode)
baseline_period = Period(baseline_period_start_date, baseline_period_end_date)
reporting_period = Period(reporting_period_start_date, reporting_period_end_date)
cds = []
if has_electricity:
cd_e = generate_consumption_records(model_e, params_e_pre, params_e_post, datetimes_pre, datetimes_post, "electricity", "kWh", weather_source)
cds.append(cd_e)
if has_gas:
cd_g = generate_consumption_records(model_g, params_g_pre, params_g_post, datetimes_pre, datetimes_post, "natural_gas", "therm", weather_source)
cds.append(cd_g)
return Project(location, cds, baseline_period, reporting_period)
def generate_consumption_records(model, params_pre, params_post, datetimes_pre, datetimes_post, fuel_type, energy_unit, weather_source):
datetimes = datetimes_pre[:-1] + datetimes_post
records = [{"start": start, "end": end, "value": np.nan}
for start, end in zip(datetimes, datetimes[1:])]
cd = ConsumptionData(records, fuel_type, energy_unit, record_type="arbitrary")
periods = cd.periods()
periods_pre = periods[:len(datetimes_pre[:-1])]
periods_post = periods[len(datetimes_pre[:-1]):]
period_pre_daily_temps = weather_source.daily_temperatures(periods_pre, TEMPERATURE_UNIT_STR)
period_post_daily_temps = weather_source.daily_temperatures(periods_post, TEMPERATURE_UNIT_STR)
period_pre_average_daily_usages = model.transform(period_pre_daily_temps, params_pre)
period_post_average_daily_usages = model.transform(period_post_daily_temps, params_post)
daily_noise_dist = None
for average_daily_usage, period in zip(period_pre_average_daily_usages, periods_pre):
n_days = period.timedelta.days
if daily_noise_dist is not None:
average_daily_usage += np.mean(daily_noise_dist.rvs(n_days))
cd.data[period.start] = average_daily_usage * n_days
for average_daily_usage, period in zip(period_post_average_daily_usages, periods_post):
n_days = period.timedelta.days
if daily_noise_dist is not None:
average_daily_usage += np.mean(daily_noise_dist.rvs(n_days))
cd.data[period.start] = average_daily_usage * n_days
return cd
def write_projects_to_csv(projects, project_csv, consumption_csv):
project_rows = []
consumption_rows = []
for project in projects:
proj = project["project"]
project_id = uuid.uuid4()
project_rows.append({
"project_id": project_id,
"baseline_period_start": proj.baseline_period.start,
"baseline_period_end": proj.baseline_period.end,
"reporting_period_start": proj.reporting_period.start,
"reporting_period_end": proj.reporting_period.end,
"latitude": proj.location.lat + (norm.rvs() * 0.01),
"longitude": proj.location.lng + (norm.rvs() * 0.01),
"zipcode": proj.location.zipcode,
"weather_station": proj.location.station,
"predicted_electricity_savings": project["predicted_electricity_savings"],
"predicted_natural_gas_savings": project["predicted_natural_gas_savings"],
"project_cost": project["project_cost"],
})
for consumption_data in proj.consumption:
for record in consumption_data.records():
consumption_rows.append({
"start": datetime.strftime(record["start"], "%Y-%m-%d"),
"end": datetime.strftime(record["end"], "%Y-%m-%d"),
"value": record["value"],
"unit_name": consumption_data.unit_name,
"fuel_type": consumption_data.fuel_type,
"project_id": project_id,
})
project_df = pd.DataFrame(project_rows)
consumption_df = | pd.DataFrame(consumption_rows) | pandas.DataFrame |
"""
Created by: <NAME>
Sep 7
IEEE Fraud Detection Model
- Add back ids
- Add V Features
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import sys
import matplotlib.pylab as plt
from sklearn.model_selection import KFold
from datetime import datetime
import time
import logging
from sklearn.metrics import roc_auc_score
from catboost import CatBoostClassifier, Pool
from timeit import default_timer as timer
start = timer()
##################
# PARAMETERS
###################
run_id = "{:%m%d_%H%M}".format(datetime.now())
KERNEL_RUN = False
MODEL_NUMBER = os.path.basename(__file__).split('.')[0]
if KERNEL_RUN:
INPUT_DIR = '../input/champs-scalar-coupling/'
FE_DIR = '../input/molecule-fe024/'
FOLDS_DIR = '../input/champs-3fold-ids/'
TARGET = "isFraud"
N_ESTIMATORS = 100000
N_META_ESTIMATORS = 500000
LEARNING_RATE = 0.1
VERBOSE = 1000
EARLY_STOPPING_ROUNDS = 500
RANDOM_STATE = 529
N_THREADS = 48
DEPTH = 7
N_FOLDS = 5
MODEL_TYPE = "catboost"
#####################
## SETUP LOGGER
#####################
def get_logger():
"""
credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480
"""
os.environ["TZ"] = "US/Eastern"
time.tzset()
FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log')
formatter = logging.Formatter(FORMAT)
handler.setFormatter(formatter)
# logger.addHandler(handler)
logger.addHandler(fhandler)
return logger
logger = get_logger()
logger.info(f'Running for Model Number {MODEL_NUMBER}')
##################
# PARAMETERS
###################
if MODEL_TYPE == 'xgboost':
EVAL_METRIC = "AUC"
elif MODEL_TYPE == 'lgbm':
EVAL_METRIC = 'AUC'
elif MODEL_TYPE == 'catboost':
EVAL_METRIC = "AUC"
##################
# TRACKING FUNCTION
###################
def update_tracking(run_id,
field,
value, csv_file="../tracking/tracking.csv", integer=False, digits=None, drop_incomplete_rows=False):
"""
Function to update the tracking CSV with information about the model
"""
try:
df = pd.read_csv(csv_file, index_col=[0])
except FileNotFoundError:
df = pd.DataFrame()
if integer:
value = round(value)
elif digits is not None:
value = round(value, digits)
if drop_incomplete_rows:
df = df.loc[~df['AUC'].isna()]
df.loc[run_id, field] = value # Model number is index
df.to_csv(csv_file)
update_tracking(run_id, "model_number", MODEL_NUMBER, drop_incomplete_rows=True)
update_tracking(run_id, "n_estimators", N_ESTIMATORS)
update_tracking(run_id, "early_stopping_rounds", EARLY_STOPPING_ROUNDS)
update_tracking(run_id, "random_state", RANDOM_STATE)
update_tracking(run_id, "n_threads", N_THREADS)
update_tracking(run_id, "learning_rate", LEARNING_RATE)
update_tracking(run_id, "n_fold", N_FOLDS)
update_tracking(run_id, "model_type", MODEL_TYPE)
update_tracking(run_id, "eval_metric", EVAL_METRIC)
#####################
# PREPARE MODEL DATA
#####################
folds = KFold(n_splits=N_FOLDS, random_state=RANDOM_STATE)
logger.info('Loading Data...')
train_df = pd.read_parquet('../input/train.parquet')
test_df = | pd.read_parquet('../input/test.parquet') | pandas.read_parquet |
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim], color='red', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax = _apply_tick_locator(ax)
ax.set_xlim(start_calibration,end_sim)
ax.set_ylabel(state_label)
return ax
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 3]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 0.5, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,300, 600])
axes[i,j].set_ylim([0,700])
plt.tight_layout()
plt.show()
model_results_WAVE1 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
#####################################
## PART 2: Hospitals vs. R0 figure ##
#####################################
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] * np.sum(Nc, axis=1)[i]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_overall = np.mean(np.sum(R0_norm,axis=0))
return R0, R0_overall
R0, R0_overall = compute_R0(initN, Nc_all['total'], samples_dicts[-1], params)
cumsum = out['H_in'].cumsum(dim='time').values
cumsum_mean = np.mean(cumsum[:,:,-1], axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_LL = cumsum_mean - np.quantile(cumsum[:,:,-1], q = 0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_UL = np.quantile(cumsum[:,:,-1], q = 1-0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0)) - cumsum_mean
cumsum = (out['H_in'].mean(dim="draws")).cumsum(dim='time').values
fraction = cumsum[:,-1]/sum(cumsum[:,-1])
fig,ax = plt.subplots(figsize=(12,4))
bars = ('$[0, 10[$', '$[10, 20[$', '$[20, 30[$', '$[30, 40[$', '$[40, 50[$', '$[50, 60[$', '$[60, 70[$', '$[70, 80[$', '$[80, \infty[$')
x_pos = np.arange(len(bars))
#ax.bar(x_pos, np.mean(R0,axis=1), yerr = [np.mean(R0,axis=1) - np.quantile(R0,q=0.05/2,axis=1), np.quantile(R0,q=1-0.05/2,axis=1) - np.mean(R0,axis=1)], width=1, color='b', alpha=0.5, capsize=10)
ax.bar(x_pos, np.mean(R0,axis=1), width=1, color='b', alpha=0.8)
ax.set_ylabel('$R_0$ (-)')
ax.grid(False)
ax2 = ax.twinx()
#ax2.bar(x_pos, cumsum_mean, yerr = [cumsum_LL, cumsum_UL], width=1,color='orange',alpha=0.9,hatch="/", capsize=10)
ax2.bar(x_pos, cumsum_mean, width=1,color='orange',alpha=0.6,hatch="/")
ax2.set_ylabel('Fraction of hospitalizations (-)')
ax2.grid(False)
plt.xticks(x_pos, bars)
plt.tight_layout()
plt.show()
#########################################
## Part 3: Robustness figure of WAVE 2 ##
#########################################
n_prevention = 4
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-06.json')), # 2020-11-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-05.json')), # 2020-11-16
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-04.json')), # 2020-12-24
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json')), # 2021-02-01
]
n_calibrations = len(samples_dicts)
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-09-01'
# Last datapoint used in inference
end_calibrations = ['2020-11-06','2020-11-16','2020-12-24','2021-02-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2021-02-14'
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Model initial condition on September 1st
warmup = 0
with open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/initial_states_2020-09-01.json', 'r') as fp:
initial_states = json.load(fp)
initial_states.update({
'VE': np.zeros(9),
'V': np.zeros(9),
'V_new': np.zeros(9),
'alpha': np.zeros(9)
})
#initial_states['ICU_tot'] = initial_states.pop('ICU')
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_schools'] = samples_dict['prev_schools'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 1, 6]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{schools}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_schools', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, start_calibration = start_calibration, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,250, 500, 750])
axes[i,j].set_ylim([0,850])
plt.tight_layout()
plt.show()
model_results_WAVE2 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
model_results = [model_results_WAVE1, model_results_WAVE2]
#################################################################
## Part 4: Comparing the maximal dataset prevention parameters ##
#################################################################
samples_dict_WAVE1 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json'))
samples_dict_WAVE2 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json'))
labels = ['$\Omega_{schools}$','$\Omega_{work}$', '$\Omega_{rest}$', '$\Omega_{home}$']
keys = ['prev_schools','prev_work','prev_rest','prev_home']
fig,axes = plt.subplots(1,4,figsize=(12,4))
for idx,ax in enumerate(axes):
if idx != 0:
(n1, bins, patches) = ax.hist(samples_dict_WAVE1[keys[idx]],bins=15,color='blue',alpha=0.4, density=True)
(n2, bins, patches) =ax.hist(samples_dict_WAVE2[keys[idx]],bins=15,color='black',alpha=0.4, density=True)
max_n = max([max(n1),max(n2)])*1.10
ax.axvline(np.mean(samples_dict_WAVE1[keys[idx]]),ls=':',ymin=0,ymax=1,color='blue')
ax.axvline(np.mean(samples_dict_WAVE2[keys[idx]]),ls=':',ymin=0,ymax=1,color='black')
if idx ==1:
ax.annotate('$\mu_1 = \mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
else:
ax.annotate('$\mu_1 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.annotate('$\mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE2[keys[idx]])), xy=(np.mean(samples_dict_WAVE2[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.set_xlabel(labels[idx])
ax.set_yticks([])
ax.spines['left'].set_visible(False)
else:
ax.hist(samples_dict_WAVE2['prev_schools'],bins=15,color='black',alpha=0.6, density=True)
ax.set_xlabel('$\Omega_{schools}$')
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.set_xlim([0,1])
ax.xaxis.grid(False)
ax.yaxis.grid(False)
plt.tight_layout()
plt.show()
################################################################
## Part 5: Relative contributions of each contact: both waves ##
################################################################
# --------------------------------
# Re-define function to compute R0
# --------------------------------
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] *Nc[i,j]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_mean = np.sum(R0_norm,axis=0)
return R0, R0_mean
# -----------------------
# Pre-allocate dataframes
# -----------------------
index=df_google.index
columns = [['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],['work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL','rest_mean','rest_LL','rest_UL',
'home_mean','home_LL','home_UL','total_mean','total_LL','total_UL','work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL',
'rest_mean','rest_LL','rest_UL','home_mean','home_LL','home_UL','total_mean','total_LL','total_UL']]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["WAVE", "Type"])
data = np.zeros([len(df_google.index),30])
df_rel = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_abs = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_Re = pd.DataFrame(data=data, index=df_google.index, columns=columns)
samples_dicts = [samples_dict_WAVE1, samples_dict_WAVE2]
start_dates =[pd.to_datetime('2020-03-15'), pd.to_datetime('2020-10-19')]
waves=["1", "2"]
for j,samples_dict in enumerate(samples_dicts):
print('\n WAVE: ' + str(j)+'\n')
# ---------------
# Rest prevention
# ---------------
print('Rest\n')
data_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
Re_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_rest[idx,:] = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
new = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
data_rest[idx,:]= old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
new_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_rest[idx,:] = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_rest_mean = np.mean(Re_rest,axis=1)
Re_rest_LL = np.quantile(Re_rest,q=0.05/2,axis=1)
Re_rest_UL = np.quantile(Re_rest,q=1-0.05/2,axis=1)
# ---------------
# Work prevention
# ---------------
print('Work\n')
data_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_work[idx,:] = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.array(samples_dict['prev_work'])
data_work[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['work'][date])*(np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_work[idx,:] = (0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0))))*np.array(samples_dict['prev_work'])
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_work_mean = np.mean(Re_work,axis=1)
Re_work_LL = np.quantile(Re_work, q=0.05/2, axis=1)
Re_work_UL = np.quantile(Re_work, q=1-0.05/2, axis=1)
# ----------------
# Home prevention
# ----------------
print('Home\n')
data_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
Re_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.ones(len(samples_dict['prev_home']))
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
new = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
data_home[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(np.sum(Nc_home,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
new_contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_home_mean = np.mean(Re_home,axis=1)
Re_home_LL = np.quantile(Re_home, q=0.05/2, axis=1)
Re_home_UL = np.quantile(Re_home, q=1-0.05/2, axis=1)
# ------------------
# School prevention
# ------------------
if j == 0:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-09-01'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1 * (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work']) # This is wrong, but is never used
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif j == 1:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_schools'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-11-16'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-11-16') < date <= pd.to_datetime('2020-12-18'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif | pd.to_datetime('2020-12-18') | pandas.to_datetime |
from datetime import datetime
import gzip
import joblib
import linecache
import numpy as np
import os
import pandas as pd
import pyBigWig
import time
import torch
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
def extract_weights(net):
list_dicts = []
for param_name, param_val in net.named_parameters():
dict_weights = {}
dict_weights["Values"] = param_val.detach().cpu().numpy().reshape(-1)
dict_weights["Name"] = [param_name] * len(dict_weights["Values"])
addf = pd.DataFrame(dict_weights)
list_dicts.append(addf)
outdf = pd.concat(list_dicts)
return outdf
def get_bce(response_ar, pred_vals, regions, resp_cutoff=0, bce=True):
response_ar = response_ar.reshape(response_ar.shape[0], 1)
pred_vals = pred_vals.reshape(pred_vals.shape[0], 1)
# response_ar = response_ar / max(response_ar)
# pred_vals = pred_vals / max(pred_vals)
all_ce_losses = 0
len_used_regs = 0
loss = torch.nn.MSELoss()
if bce:
loss = torch.nn.BCELoss()
for region in np.unique(regions):
idx_reg = np.where(regions == region)[0]
resp_ar = np.sum(
response_ar[None, idx_reg] - response_ar[idx_reg, None],
axis=-1)
pred_ar = np.sum(
pred_vals[None, idx_reg] - pred_vals[idx_reg, None], axis=-1)
pred_tensor = torch.triu(
torch.from_numpy(pred_ar), diagonal=1)
resp_tensor = torch.from_numpy(
resp_ar[np.triu_indices(resp_ar.shape[0])]).view(-1, 1)
pred_tensor = pred_tensor[
np.triu_indices(pred_tensor.shape[0])].view(-1, 1)
idx_eval = np.where(
abs(resp_tensor.cpu().detach().numpy()) > resp_cutoff)[0]
if len(idx_eval) > 0:
len_used_regs += 1
if bce:
resp_tensor = resp_tensor[idx_eval]
resp_tensor[resp_tensor > resp_cutoff] = 1
resp_tensor[resp_tensor < (-1 * resp_cutoff)] = 0
pred_tensor = torch.sigmoid(pred_tensor[idx_eval])
else:
resp_tensor = resp_tensor[idx_eval]
pred_tensor = pred_tensor[idx_eval]
try:
ce_loss = loss(pred_tensor, resp_tensor)
except Exception:
try:
ce_loss = loss(pred_tensor, resp_tensor.double())
except Exception:
raise ValueError("Failed at BCE")
all_ce_losses += ce_loss
if len_used_regs > 0:
loss = all_ce_losses.cpu().detach().numpy() / len_used_regs
else:
loss = np.nan()
return loss
def get_ce_loss(criterion_direction, response_tensor, model_init, regions,
resp_cutoff=0, bce=True):
pred_vals = model_init.detach().cpu().numpy()
all_ce_losses = 0
len_used_regs = 0
for region in np.unique(regions):
idx_reg = np.where(regions == region)
resp_ar = np.sum(
response_tensor[None, idx_reg] - response_tensor[idx_reg, None],
axis=-1)[0]
pred_ar = np.sum(
pred_vals[None, idx_reg] - pred_vals[idx_reg, None], axis=-1)[0]
pred_tensor = torch.triu(
torch.from_numpy(pred_ar), diagonal=1)
resp_tensor = torch.from_numpy(
resp_ar[np.triu_indices(resp_ar.shape[0])]).view(-1, 1).to(device)
pred_tensor = pred_tensor[
np.triu_indices(pred_tensor.shape[0])].view(-1, 1).to(device)
idx_eval = np.where(
abs(resp_tensor.cpu().detach().numpy()) > resp_cutoff)[0]
if len(idx_eval) > 0:
len_used_regs += 1
if not bce:
resp_tensor = resp_tensor[idx_eval]
pred_tensor = pred_tensor[idx_eval]
else:
resp_tensor = resp_tensor[idx_eval]
resp_tensor[resp_tensor > resp_cutoff] = 1
resp_tensor[resp_tensor < (-1 * resp_cutoff)] = 0
pred_tensor = torch.sigmoid(pred_tensor[idx_eval])
ce_loss = criterion_direction(pred_tensor, resp_tensor)
all_ce_losses += ce_loss
del resp_tensor, pred_tensor
if len_used_regs > 0:
ce_loss = all_ce_losses / len_used_regs
else:
ce_loss = all_ce_losses
return ce_loss
def regularize_loss(modelparams, net, loss):
lambda1 = modelparams["lambda_param"]
ltype = modelparams["ltype"]
if ltype == 3:
if torch.cuda.device_count() > 1:
torch.nn.utils.clip_grad_norm_(
net.module.conv.parameters(), lambda1)
torch.nn.utils.clip_grad_norm_(
net.module.layer1.parameters(), lambda1)
torch.nn.utils.clip_grad_norm_(
net.module.layer2.parameters(), lambda1)
torch.nn.utils.clip_grad_norm_(
net.module.layer3.parameters(), lambda1)
if len(modelparams["convparam"]) == 4:
torch.nn.utils.clip_grad_norm_(
net.module.layer4.parameters(), lambda1)
else:
torch.nn.utils.clip_grad_norm_(
net.conv.parameters(), lambda1)
torch.nn.utils.clip_grad_norm_(
net.layer1.parameters(), lambda1)
torch.nn.utils.clip_grad_norm_(
net.layer2.parameters(), lambda1)
torch.nn.utils.clip_grad_norm_(
net.layer3.parameters(), lambda1)
if len(modelparams["convparam"]) == 4:
torch.nn.utils.clip_grad_norm_(
net.layer4.parameters(), lambda1)
if torch.cuda.device_count() > 1:
l0_params = torch.cat(
[x.view(-1) for x in
net.module.conv.parameters()])
l1_params = torch.cat(
[x.view(-1) for x in
net.module.layer1.parameters()])
l2_params = torch.cat(
[x.view(-1) for x in
net.module.layer2.parameters()])
l3_params = torch.cat(
[x.view(-1) for x in
net.module.layer3.parameters()])
if len(modelparams["convparam"]) == 4:
l4_params = torch.cat(
[x.view(-1) for x in
net.module.layer4.parameters()])
else:
l0_params = torch.cat(
[x.view(-1) for x in net.conv.parameters()])
l1_params = torch.cat(
[x.view(-1) for x in net.layer1.parameters()])
l2_params = torch.cat(
[x.view(-1) for x in net.layer2.parameters()])
l3_params = torch.cat(
[x.view(-1) for x in net.layer3.parameters()])
if len(modelparams["convparam"]) == 4:
l4_params = torch.cat(
[x.view(-1) for x in net.layer4.parameters()])
if ltype in [1, 2]:
l1_l0 = lambda1 * torch.norm(l0_params, ltype)
l1_l1 = lambda1 * torch.norm(l1_params, ltype)
l1_l2 = lambda1 * torch.norm(l2_params, ltype)
l1_l3 = lambda1 * torch.norm(l3_params, ltype)
if len(modelparams["convparam"]) == 4:
l1_l4 = lambda1 * torch.norm(l4_params, 1)
loss = loss + l1_l0 + l1_l1 + l1_l2 + l1_l3 + l1_l4
else:
loss = loss + l1_l0 + l1_l1 + l1_l2 + l1_l3
elif ltype == 4:
l1_l0 = lambda1 * torch.norm(l0_params, 1)
l1_l1 = lambda1 * torch.norm(l1_params, 1)
l1_l2 = lambda1 * torch.norm(l2_params, 1)
l1_l3 = lambda1 * torch.norm(l3_params, 1)
l2_l0 = lambda1 * torch.norm(l0_params, 2)
l2_l1 = lambda1 * torch.norm(l1_params, 2)
l2_l2 = lambda1 * torch.norm(l2_params, 2)
l2_l3 = lambda1 * torch.norm(l3_params, 2)
if len(modelparams["convparam"]) == 4:
l1_l4 = lambda1 * torch.norm(l4_params, 1)
l2_l4 = lambda1 * torch.norm(l4_params, 2)
loss = loss + l1_l0 + l1_l1 + l1_l2 +\
l1_l3 + l1_l4 + l2_l0 + l2_l1 +\
l2_l2 + l2_l3 + l2_l4
else:
loss = loss + l1_l0 + l1_l1 + l1_l2 +\
l1_l3 + l2_l0 + l2_l1 +\
l2_l2 + l2_l3
return loss
def motor_log(epoch, j, dict_perf, lr, tempdir,
current_loss, net, modelpath, macrobatch,
regression=False):
if regression:
log_model_regression(
os.path.join(
tempdir,
"modelLog_lr{}_macrobatch{}.tsv".format(
lr, macrobatch)),
epoch, current_loss, j,
dict_perf["Tuning.R2"], dict_perf["Tuning.Loss"],
dict_perf["Training.R2"],
dict_perf["averageDNase.R2"])
else:
log_model(
os.path.join(
tempdir,
"modelLog_lr{}_macrobatch{}.tsv".format(
lr, macrobatch)),
epoch, current_loss, j,
dict_perf["Tuning.auROC"], dict_perf["Tuning.Loss"],
dict_perf["Tuning.AveragePrecision"],
dict_perf["Training.auROC"],
dict_perf["Training.AveragePrecision"],
dict_perf["average.auROC"],
dict_perf["average.AP"])
def log_model_regression(logpath, epoch, train_loss,
j, tune_r2, tune_loss,
train_r2, baseline_r2):
current_time = str(datetime.now())
if epoch == 0:
if not os.path.exists(logpath):
with open(logpath, "w") as loglink:
adlist = [
"Time", "Epoch", "MiniBatch", "Training.Loss",
"Training.R2", "Tuning.Loss",
"Tuning.R2", "averageDnase.R2"]
loglink.write("\t".join(adlist) + "\n")
with open(logpath, "a+") as loglink:
float_vals = [train_loss,
train_r2, tune_loss,
tune_r2, baseline_r2]
float_vals = [str(round(each, 5)) for each in float_vals]
adlist = [current_time, str(epoch), str(j)] + float_vals
print("\t".join(adlist))
loglink.write("\t".join(adlist) + "\n")
def log_model(logpath, epoch, train_loss,
j, tune_auroc, tuning_loss, tuning_ap,
train_auroc, train_ap,
baseline_auroc,
baseline_ap):
current_time = str(datetime.now())
if epoch == 0:
if not os.path.exists(logpath):
with open(logpath, "w") as loglink:
adlist = [
"Time", "Epoch", "MiniBatch", "Training.Loss",
"Training.auROC", "Training.AveragePrecision",
"Tuning.Loss", "Tuning.auROC",
"Tuning.AveragePrecision",
"AverageDnase.auROC",
"AverageDnase.averagePrecision"]
print("\t".join(adlist))
loglink.write("\t".join(adlist) + "\n")
with open(logpath, "a+") as loglink:
float_vals = [train_loss, train_auroc,
train_ap, tuning_loss,
tune_auroc, tuning_ap,
baseline_auroc, baseline_ap]
float_vals = [str(round(each, 5)) for each in float_vals]
adlist = [current_time, str(epoch), str(j)] + float_vals
print("\t".join(adlist))
loglink.write("\t".join(adlist) + "\n")
def printProgressBar(iteration, total, prefix='', suffix='',
decimals=1, length=100, fill='█',
printEnd="\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(
100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix),
end=printEnd)
# Print New Line on Complete
if iteration == total:
print()
def split_tensordict(tensordict_all, ratio=0.8):
len_items = len(tensordict_all["Response"])
split_idx = int(len_items * ratio)
tensordict = {}
tensordict_tune = {}
for each_key, each_val in tensordict_all.items():
tensordict[each_key] = tensordict_all[each_key][:split_idx]
tensordict_tune[each_key] = tensordict_all[each_key][split_idx:]
return tensordict, tensordict_tune
def get_n_params(model):
pp = 0
for p in list(model.parameters()):
nn = 1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
def make_adname(modelparams):
augmentations = modelparams["augmentations"]
adname = "Block_{}_Init_{}x{}_K{}".format(
"_".join([str(each) for each in
modelparams["convparam"]]),
int(modelparams["initconv"]),
modelparams["filter_rate"],
int(modelparams["kernel_size"]))
adname = adname +\
"_D_{}_Pool_{}_lr_{}".format(
"_".join(
[str(each) for each in modelparams["dilations"]]),
modelparams["pool_dim"],
modelparams["lr"])
adname = adname +\
"s_{}__{}_Aug_{}".format(
modelparams["stride"],
modelparams["optimizer"],
"-".join(augmentations))
# adname = adname +\
# "_DP_{}".format(modelparams["dropout"])
# if modelparams["normtype"] != "BatchNorm":
# adname = adname + "_{}".format(modelparams["normtype"])
if modelparams["regularize"]:
if modelparams["ltype"] in [1, 2]:
adname = adname +\
"_l{}_{}".format(
modelparams["ltype"],
modelparams["lambda_param"])
elif modelparams["ltype"] == 3:
adname = adname +\
"_GC_{}".format(modelparams["lambda_param"])
elif modelparams["ltype"] == 4:
adname = adname +\
"_l1Andl2_{}".format(modelparams["lambda_param"])
else:
raise ValueError("--ltype > 4 not supported")
# if "SCALE" in modelparams.keys():
# scale_str = "-".join(
# [str(each) for each in modelparams["SCALE"]])
# scale_str = scale_str + "-{}".format(modelparams["SCALE_OP"])
# adname = adname + "_{}".format(scale_str)
# if "LOSS_SCALERS" in modelparams.keys():
# scale_str = "-".join(
# [str(each) for each in modelparams["LOSS_SCALERS"]])
# adname = adname + "_{}".format(scale_str)
# if "RESP_THRESH" in modelparams.keys():
# ad_str = "Resp.Quantile.{}".format(
# modelparams["RESP_THRESH"])
# adname = adname + "_{}".format(ad_str)
if "arcsinh" in modelparams.keys():
adname = adname + "_{}{}".format(
"arcsinh", modelparams["arcsinh"])
if "input_normalize" in modelparams.keys():
adname = adname + "_{}-{}".format(
"normalizeInput",
modelparams["input_normalize"])
return adname
def compile_paths(outdir, modelparams):
adname = make_adname(modelparams)
logdir = os.path.join(
outdir, "modelLog", adname)
os.makedirs(logdir, exist_ok=True)
chkdir = os.path.join(
"/checkpoint/mkarimza",
os.environ["SLURM_JOB_ID"])
if not os.path.exists(chkdir):
chkdir = os.path.join(logdir, "checkpoints")
os.makedirs(chkdir, exist_ok=True)
chkpaths = [
os.path.join(chkdir, "{}_{}.pt".format(adname, each))
for each in [0, 1]]
itrpaths = [
os.path.join(logdir, "iterDetails_{}.pickle".format(each))
for each in [0, 1]]
modelpath = os.path.join(
logdir,
"{}_currentmodel.pt".format(adname))
modelpath_bestloss = os.path.join(
logdir,
"{}_bestmodel.pt".format(adname))
tempdir = os.path.join(
outdir, "tempData")
os.makedirs(tempdir, exist_ok=True)
dictpaths = {
"adname": adname,
"chkpaths": chkpaths, "logdir": logdir,
"modelpath": modelpath,
"modelpath_bestloss": modelpath_bestloss,
"tempdir": tempdir, "itrpaths": itrpaths}
return dictpaths
def prepare_response(response_tensor):
response_tensor[np.where(response_tensor > 0)] = 1
response_tensor = np.array(response_tensor, dtype=int)
response_tensor = torch.from_numpy(response_tensor)
response_onehot = torch.FloatTensor(
response_tensor.shape[0], 2)
response_onehot.zero_()
response_onehot.scatter_(1, response_tensor, 1)
return response_onehot
def find_minibatch_size_fast(tensordict, net, criterion,
optimizer, device, max_memory=9e9,
regression=False):
from apex import amp
num_devices = torch.cuda.device_count()
MINIBATCH = 2
dnase_tensor = torch.from_numpy(
tensordict["DNase"][:MINIBATCH]).to(device)
rna_tensor = torch.from_numpy(
tensordict["RNA"][:MINIBATCH]).to(device)
if not regression:
response_tensor = prepare_response(
tensordict["Response"][:MINIBATCH])
optimizer.zero_grad()
labels = response_tensor.long()
label_idx = torch.max(labels, 1)[1].to(device)
else:
label_idx = torch.from_numpy(
tensordict["Response"][:MINIBATCH]).to(device)
model_init = net(
dnase_tensor,
rna_tensor)
try:
loss = criterion(
model_init, label_idx)
except Exception:
loss = criterion(
model_init[0], label_idx)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
used_memory = torch.cuda.max_memory_allocated(device)
print("Processing 2 batches needs {} GB of GPU memory".format(
used_memory / 1e9))
newbatch = MINIBATCH * int(max_memory / used_memory) * num_devices
print("Set minibatch size to {}".format(newbatch))
del dnase_tensor, rna_tensor
if not regression:
del labels, label_idx, model_init, response_tensor
else:
del label_idx
torch.cuda.empty_cache()
MINIBATCH = newbatch
try:
dnase_tensor = torch.from_numpy(
tensordict["DNase"][:MINIBATCH]).to(device)
rna_tensor = torch.from_numpy(
tensordict["RNA"][:MINIBATCH]).to(device)
if not regression:
response_tensor = prepare_response(
tensordict["Response"][:MINIBATCH])
labels = response_tensor.long()
label_idx = torch.max(labels, 1)[1].to(device)
else:
label_idx = torch.from_numpy(
tensordict["Response"][:MINIBATCH]).to(device)
optimizer.zero_grad()
model_init = net(
dnase_tensor,
rna_tensor)
loss = criterion(
model_init, label_idx)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
del dnase_tensor, rna_tensor
if not regression:
del labels, label_idx, model_init, response_tensor
else:
del label_idx
except Exception:
newbatch = int(MINIBATCH * 0.8)
torch.cuda.empty_cache()
return newbatch
class Augmentator:
def __init__(self, ars, response=[], nucs=["A", "T", "C", "G"]):
'''
Accepts a list of arrays to perform a
variety of augmentations on them
ars: a list of numpy arrays
response: response (only needed for .mask_signal)
nucs: by default set to A T C G order similar
to DataHandler
'''
self.ars = ars
self.response = response
self.nucs = nucs
self.dict_nucs = {"A": "T", "T": "A",
"C": "G", "G": "C",
"N": "N"}
def reverse_complement(self):
'''
Reverse complements arrays within self.ars
'''
list_ars = []
for ar in self.ars:
assert len(ar.shape) == 3, "Expects tensor"
newar = np.zeros(
ar.shape, dtype=np.float16)
for i in range(4):
nuc = self.nucs[i]
rev_nuc = self.dict_nucs[nuc]
# i' is the index of the complement nucleotide
i_prime = np.where(
np.array(self.nucs) == rev_nuc)[0]
idx_nuc = np.where(ar[:, i, :] > 0)
# for idx_nuc[1] which refers to position, reverse it
newar[idx_nuc[0], i_prime, newar.shape[2] - idx_nuc[1] - 1] = \
ar[idx_nuc[0], i, idx_nuc[1]]
list_ars.append(newar)
return list_ars, self.response
def mask_background(self):
'''
For positions without any signal, set the values to 0
Signal is identified as value > 0.1
'''
BGVAL = 0.1 # background value
list_ars = []
for ar in self.ars:
assert len(ar.shape) == 3, "Expects tensor"
newar = np.zeros(
ar.shape, dtype=np.float16)
for i in range(4):
idx_add = np.where(ar[:, i, :] > BGVAL)
newar[idx_add[0], i, idx_add[1]] = \
ar[idx_add[0], i, idx_add[1]]
list_ars.append(newar)
return list_ars, self.response
def mask_signal(self):
'''
Use this type of augmentation to convert a positive
example to a negative example.
Requires self.response and will set it to 0.
'''
assert len(self.response) > 0, "Expects response"
BGVAL = 0.1 # background value
list_ars = []
for ar in self.ars:
assert len(ar.shape) == 3, "Expects tensor"
newar = np.zeros(
ar.shape, dtype=np.float16)
for i in range(4):
idx_add = np.where(ar[:, i, :] == BGVAL)
newar[idx_add[0], i, idx_add[1]] = \
ar[idx_add[0], i, idx_add[1]]
list_ars.append(newar)
out_resp = self.response.copy()
out_resp[out_resp > 0] = 0
return list_ars, out_resp
class DataHandler:
def __init__(self, dnasematpath, dnasemetapath,
dnaseindexpath, sequencedir,
trainchroms, validchroms,
validorgans, window=50000,
limit=True,
select_organs=["default"],
select_labels=["default"],
tissuespecific=False):
'''
Arguments:
dnasematpath: Path to dat_FDR01_hg38.txt
dnasemetapath: Path to DNaseMetaDataWithAvailableRnas.tsv
dnaseindexpath: Path to DHS_Index_and_Vocabulaty...
sequencedir: Path to hg38/np
trainchroms: List of chromosomes to use for training
validchroms: List of chromosomes to use for validation
validorgans: List of organs to exclude
window: Window size around each specific peak
limit: Boolean indicating if should use tissue-specific
peaks only. It will ignore trainids and will only
use specific tissues outlined in
self.select_organs and self.select_labels
select_organs: list of organs to limit to
select_labels: list of enhancer labels to limit to
tissuespecific: If you'd want multiple samples form the
same region
'''
self.tissuespecific = tissuespecific
self.nucs = np.array(["A", "T", "C", "G"])
self.trainchroms = trainchroms
self.validchroms = validchroms
# self.trainids = trainids # ignore trainids
self.validorgans = validorgans
self.dnasematpath = dnasematpath
self.dnasemetapath = dnasemetapath
self.dnaseindexpath = dnaseindexpath
self.sequencedir = sequencedir
self.window = window
self.limit = limit
self.dict_indices_train = {}
self.select_organs = [
"Brain", "Placenta", "Tongue",
"Muscle", "Blood", "Spinal Cord",
"Heart", "Lung"]
self.select_labels = [
"Neural", "Lymphoid",
"Placental / trophoblast",
"Musculoskeletal",
"Cardiac",
"Pulmonary devel."]
if select_organs[0] != "default":
self.select_organs = select_organs
if select_labels[0] != "default":
self.select_labels = select_labels
self.dictissue = {
"Brain": ["Neural"],
"Placenta": ["Placental / trophoblast"],
"Tonge": ["Musculoskeletal"],
"Muscle": ["Musculoskeletal"],
"Blood": ["Lymphoid"],
"Spinal Cord": ["Neural"],
"Heart": ["Cardiac", "Musculoskeletal"],
"Lung": ["Pulmonary devel."]}
self.process_data()
def get_rna(self, idx_region, idx_sample):
chrom, summit = [
self.idxdf.iloc[idx_region, j] for j in
[0, 6]]
start = summit - self.window
end = summit + self.window
chrom_seq = self.get_seq(chrom)
rnatensor = self.initiate_seq(
chrom_seq, start, end)
if start < 0:
start = 0
tempdf = self.metadf.iloc[idx_sample, :]
rnapaths = tempdf["RnaPaths"].split(",")
list_rnas = []
for rnapath in rnapaths:
rnatensorlocal = rnatensor.copy()
try:
bw = pyBigWig.open(rnapath)
except Exception:
print("Check {}".format(rnapath))
raise ValueError("BigWig issue, check logs")
chrom_max = bw.chroms()[chrom]
if end > chrom_max:
end = chrom_max
signal = bw.values(chrom, start, end, numpy=True)
signal[np.isinf(signal)] = max(
signal[np.logical_not(np.isinf(signal))])
for j in range(len(self.nucs)):
nuc = self.nucs[j].encode()
i = np.where(
np.logical_and(
signal > 0,
chrom_seq[start:end] == nuc))[0]
rnatensorlocal[j, i] = rnatensorlocal[j, i] + signal[i]
list_rnas.append(rnatensorlocal)
if len(list_rnas) == 1:
rnatensor = list_rnas[0]
else:
rnatensor = np.zeros(rnatensor.shape)
for each in list_rnas:
rnatensor = rnatensor + each
rnatensor = rnatensor / len(list_rnas)
return rnatensor
def process_data(self):
# dnasemat = pd.read_csv(self.dnasematpath, sep="\t")
self.metadf = pd.read_csv(self.dnasemetapath, sep="\t")
self.metadf["Index"] = np.arange(self.metadf.shape[0])
self.idxdf = pd.read_csv(
self.dnaseindexpath, sep="\t", compression="gzip")
self.idxdf["Index"] = np.arange(self.idxdf.shape[0])
if self.limit:
self.metadf = self.metadf[
self.metadf["Organ"].isin(self.select_organs)]
self.idxdf = self.idxdf[
self.idxdf["component"].isin(self.select_labels)]
trainiddf = self.metadf[
np.logical_not(pd.isna(self.metadf["RnaPaths"]))]
self.trainorgans = [
each for each in | pd.unique(trainiddf["Organ"]) | pandas.unique |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 17:07:38 2021
@author: perger
"""
# import packages
import pandas as pd
from datetime import timedelta, datetime
import pyam
import FRESH_clustering
from pathlib import Path
import glob
# Model name and version, scenario, region
model_name = 'FRESH:COM v2.0'
scenario_name = 'Default scenario'
region_name = 'Austria'
#filename_community = 'Input_data_community_IAMC.xlsx'
filename_grid = 'Input_data_grid_IAMC.csv'
filename_output = 'output_iamc.xlsx'
clustering = True
# Aggregation in the time domain: preparation
time_zone = '+01:00' # deviation from UTC (+01:00 is CET)
start_date = '2019-01-01 00:00' # YYYY-MM-DD HH:MM
number_days = 365
delta = timedelta(hours=1) # resolution ... hourly
time_steps = []
for t in range(24*number_days):
time_steps.append((datetime.fromisoformat(start_date+time_zone)+t*delta))
index_time = list(range(len(time_steps)))
# Read Input Data (from the IAMC Format)
# input data of prosuemr
#_p = Path('Input_data/Prosumer_data')
prosumer_files = {}
prosumer = []
for file in glob.glob("*.csv"):
i = Path(file).stem
if i.startswith('Prosumer'):
prosumer.append(i)
prosumer_files[i] = Path(file)
#file_community = pd.ExcelFile(filename_community)
#prosumer = file_community.sheet_names
# Electricity demand, PV generation, and other prosumer data
variable_load = 'Final Energy|Residential and Commercial|Electricity'
variable_PV = 'Secondary Energy|Electricity|Solar|PV'
SoC_max = 'Maximum Storage|Electricity|Energy Storage System'
SoC_min = 'Minimum Storage|Electricity|Energy Storage System'
q_bat_max = 'Maximum Charge|Electricity|Energy Storage System'
q_bat_min = 'Maximum Discharge|Electricity|Energy Storage System'
PV_capacity = 'Maximum Active power|Electricity|Solar'
w = 'Price|Carbon'
_a = [SoC_max, SoC_min, q_bat_max, q_bat_min, PV_capacity, w]
load = pd.DataFrame()
PV = pd.DataFrame()
prosumer_data = | pd.DataFrame() | pandas.DataFrame |
# License: Apache-2.0
import databricks.koalas as ks
import pandas as pd
import numpy as np
import pytest
from pandas.testing import assert_frame_equal
from gators.imputers.numerics_imputer import NumericsImputer
from gators.imputers.int_imputer import IntImputer
from gators.imputers.float_imputer import FloatImputer
from gators.imputers.object_imputer import ObjectImputer
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture()
def data():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num():
X_int = pd.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing():
X_int = pd.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.copy(),
'float': X_float.copy(),
'object': X_object.copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
@pytest.fixture()
def data_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
X_int_ks = ks.from_pandas(X_int)
X_float_ks = ks.from_pandas(X_float)
X_object_ks = ks.from_pandas(X_object)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_dict = {
'int': X_int_ks,
'float': X_float_ks,
'object': X_object_ks,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num_ks():
X_int = ks.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing_ks():
X_int = ks.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = ks.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.to_pandas().copy(),
'float': X_float.to_pandas().copy(),
'object': X_object.to_pandas().copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = ks.from_pandas(pd.concat([X_int, X_float, X_object], axis=1))
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
def test_int_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_float_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_object_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_int_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']).to_pandas(),
X_expected_dict['int'],)
@pytest.mark.koalas
def test_float_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['float'].transform(X_dict['float']).to_pandas(),
X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['object'].transform(X_dict['object']).to_pandas(),
X_expected_dict['object'],
)
def test_int_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_float_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_object_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['object'].transform_numpy(X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
@pytest.mark.koalas
def test_int_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_float_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['object'].transform_numpy(
X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
def test_num_int_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_num_float_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
@pytest.mark.koalas
def test_num_int_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['int'].transform(
X_dict['int'].to_pandas()), X_expected_dict['int'],
)
@pytest.mark.koalas
def test_num_float_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['float'].transform(
X_dict['float'].to_pandas()), X_expected_dict['float'],
)
def test_num_int_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_num_float_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
| assert_frame_equal(X_new, X_expected_dict['float']) | pandas.testing.assert_frame_equal |
__all__ = ['class_error', 'groupScatter', 'linear_spline', 'lm', 'mae',
'plotPrediction', 'plot_hist', 'r2', 'statx', 'winsorize',]
import riptable as rt
import numpy as np
from .rt_enum import TypeRegister
from .rt_fastarray import FastArray
from .rt_numpy import zeros
# extra classes
import pandas as pd
from bokeh.plotting import output_notebook, figure, show
from bokeh.models import Label
#TODO: Organize the functions in here better
#TODO: Add documentation
#TODO: Replace pandas dependence with display util
def statx(X):
if not isinstance(X, np.ndarray):
X = np.array(X)
pVals = [0.1, 1, 10, 25, 50, 75, 90, 99, 99.9]
pValNames = ['min', '0.1%', '1%', '10%', '25%', '50%', '75%', '90%','99%','99.9%' , 'max' , 'Mean', 'StdDev', 'Count', 'NaN_Count']
filt = np.isfinite(X)
X_sub = X[filt]
vals = np.percentile(X_sub,pVals)
vals =np.insert(vals,0,np.min(X_sub))
vals =np.append(vals,np.max(X_sub))
vals = np.append(vals,np.mean(X_sub))
vals = np.append(vals,np.std(X_sub))
validcount = np.sum(filt)
# plain count
vals = np.append(vals, X.size)
#nancount
vals = np.append(vals, np.sum(np.isnan(X)))
out = pd.DataFrame({'Stat' : pValNames ,'Value' : vals})
return out
#NOTE: people might prefer name clip/bound?
def winsorize(Y, lb, ub):
out = np.maximum(np.minimum(Y, ub), lb)
return out
def plot_hist(Y, bins):
df = pd.DataFrame({'Y': Y})
df.hist(bins=bins)
def r2(X, Y):
# why are these flipped back?
xmean = np.mean(X)
ymean = np.mean(Y)
xycov = np.mean(np.multiply((X - xmean), (Y - ymean)))
xvar = np.var(X)
yvar = np.var(Y)
r2_value = (xycov * xycov) / (xvar * yvar)
return r2_value
def mae(X, Y):
return np.nanmean(np.abs(X - Y))
def class_error(X, Y):
X2 = np.round(X)
Y2 = np.round(Y)
class_err = np.sum(np.abs(X2 - Y2)) / X2.shape[0]
return class_err
def lm(X, Y, intercept=True, removeNaN=True, displayStats=True):
#TODO: Better display for stats
X0 = X.copy()
Y0 = Y.copy()
if len(X0.shape) == 1:
X0 = X0.reshape(X0.shape[0], 1)
if len(Y0.shape) == 1:
Y0 = Y0.reshape(Y0.shape[0], 1)
if intercept:
X0 = np.hstack([np.ones((X0.shape[0],1)), X0])
if removeNaN:
goodData = ~np.isnan(np.sum(X0, axis=1)) & ~np.isnan(np.sum(Y0, axis=1))
X0 = X0[goodData, :]
Y0 = Y0[goodData, :]
VXX = np.matmul(np.transpose(X0), X0)
VXY = np.matmul(np.transpose(X0), Y0)
coeff = np.linalg.solve(VXX, VXY)
if displayStats:
YHat = np.matmul(X0, coeff)
err = Y0 - YHat
err = err.reshape(Y0.shape[0], Y0.shape[1])
RMS = np.sqrt(np.mean(err * err))
MAE = np.mean(np.abs(err))
A = np.linalg.solve(VXX, np.transpose(X0))
SE = np.sqrt(np.sum(A * A, axis=1)) * RMS
tStat = coeff / SE.reshape(coeff.shape[0], 1)
R = np.mean(YHat * Y0) / (np.std(YHat) * np.std(Y0))
R2 = R * R
print('R2 = ', R2)
print('RMSE = ', RMS)
print('MAE = ', MAE)
print('tStats: ')
print(tStat)
return coeff
def linear_spline(X0, Y0, knots, display = True):
X = X0.copy()
Y = Y0.copy()
X = X.reshape(X.shape[0], 1)
Y = Y.reshape(Y.shape[0], 1)
knots.sort()
numKnots = len(knots)
goodData = ~np.isnan(X) & ~np.isnan(Y)
X = X[goodData]
Y = Y[goodData]
XAug = np.nan * np.zeros((X.shape[0], 2 + numKnots))
XAug[:, 0] = np.ones_like(X)
XAug[:, 1] = X
for j in range(numKnots):
XAug[:, 2 + j] = np.maximum(X - knots[j], 0.0)
coeff = lm(XAug, Y, intercept=False, removeNaN=True, displayStats=False)
YHat = np.matmul(XAug, coeff)
X_uniq, X_idx = np.unique(X, return_index=True)
YHat_uniq = YHat[X_idx]
# idx = X_uniq <= ub & X_uniq >= lb
output_notebook()
# create a new plot
p = figure(tools="pan,box_zoom,reset,save",
title="example",
x_axis_label='sections',
y_axis_label='particles')
p.circle(X_uniq.flatten(), YHat_uniq.flatten(), legend="y", fill_color="red", size=8)
if display:
show(p)
return knots, coeff
#TODO: Make formatting aware of environment, e.g., Spyder, jupyter, etc. in groupScatter and plotPrediction
#NOTE: Can we use regPlot from seaborn
#won't display in jupyter lab
#better auto-detect bounds
#suppress nan warnings
def plotPrediction(X, Yhat, Y, N, lb=None, ub=None):
if lb is None:
lowerBound = np.nanmin(X)
else:
lowerBound = lb
if lb is None:
upperBound = np.nanmax(X)
else:
upperBound = ub
goodFilt = np.isfinite(X) & np.isfinite(Y) & (X <= upperBound) & (X >= lowerBound) & \
np.isfinite(Yhat) & np.isfinite(Y)
dF = | pd.DataFrame({'X': X[goodFilt], 'Y': Y[goodFilt], 'Yhat': Yhat[goodFilt]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
from django.core.files import File
from django.core.exceptions import ObjectDoesNotExist
from fixtures_functions import *
from main.functions import max_num_asiento, crea_asiento_simple, extraer_asientos, crear_asientos, valida_simple, valida_compleja
class TestMaxNumAsiento:
def test_get_max_num_asiento(self, populate_database):
_, _, movimientos = populate_database
nums = [ m.num for m in movimientos ]
assert max(nums) == max_num_asiento()
def test_get_max_num_asiento_empty_db(self):
assert max_num_asiento() == 0
class TestCreaAsientoSimple:
def test_crea_asiento_values(self, populate_database):
_, cuentas, _ = populate_database
simple = {
'num': 500,
'fecha': datetime.date(2022, 1, 26),
'descripcion': 'Descripción del movimiento',
'valor': 352.55,
'debe': cuentas[0],
'haber': cuentas[1],
}
crea_asiento_simple(simple)
asiento = Movimiento.objects.filter(num=500)
for movimiento in asiento:
assert movimiento.num == simple['num']
assert movimiento.fecha == simple['fecha']
assert movimiento.descripcion == simple['descripcion']
assert float(movimiento.debe) in [simple['valor'], 0.0]
assert float(movimiento.haber) in [simple['valor'], 0.0]
assert movimiento.cuenta == cuentas[0] or cuentas[1]
def test_crea_asiento_as_strings(self, populate_database):
_, cuentas, _ = populate_database
# providing the values as strings, as provided by form
simple = {
'num': 500,
'fecha': '2022-01-26',
'descripcion': 'Descripción del movimiento',
'valor': '352.55',
'debe': cuentas[0],
'haber': cuentas[1],
}
crea_asiento_simple(simple)
asiento = Movimiento.objects.filter(num=500)
for movimiento in asiento:
assert movimiento.num == simple['num']
assert movimiento.fecha == datetime.date.fromisoformat(simple['fecha'])
assert movimiento.descripcion == simple['descripcion']
assert float(movimiento.debe) in [float(simple['valor']), 0.0]
assert float(movimiento.haber) in [float(simple['valor']), 0.0]
assert movimiento.cuenta == cuentas[0] or cuentas[1]
class TestExtraerAsientos:
def test_loading_good_file_plantilla_simple(self):
f = open('main/tests/data/plantilla.xlsx', 'rb')
simple, _ = extraer_asientos(File(f))
assert isinstance(simple, pd.DataFrame)
for col in ['Fecha', 'Descripción', 'Valor', 'Debe', 'Haber']:
assert col in simple.columns
assert len(simple) == 4
for descr in ['Pan', 'Pizzas Telepizza', 'Gasolina coche', 'Hipoteca']:
assert descr in list(simple['Descripción'])
def test_loading_good_file_plantilla_compleja(self):
f = open('main/tests/data/plantilla.xlsx', 'rb')
_, compleja = extraer_asientos(File(f))
assert isinstance(compleja, pd.DataFrame)
for col in ['id', 'Fecha', 'Descripción', 'Debe', 'Haber', 'Cuenta']:
assert col in compleja.columns
assert len(compleja) == 6
for descr in ['Cena en el restaurante', 'Cena en el restaurante propina', 'Factura EDP - gas y electricidad', 'Factura EDP - gas', 'Factura EDP - electricidad']:
assert descr in list(compleja['Descripción'])
# test on an excel with wrong format, and a non-excel file
@pytest.mark.parametrize('filename', ['empty_file.xlsx', 'logo.svg'])
def test_loading_bad_file_plantilla_simple(self, filename):
f = open('main/tests/data/'+filename, 'rb')
simple, _ = extraer_asientos(File(f))
assert isinstance(simple, pd.DataFrame)
for col in ['Fecha', 'Descripción', 'Valor', 'Debe', 'Haber']:
assert col in simple.columns
assert len(simple) == 0
# test on an excel with wrong format, and a non-excel file
@pytest.mark.parametrize('filename', ['empty_file.xlsx', 'logo.svg'])
def test_loading_bad_file_plantilla_compleja(self, filename):
f = open('main/tests/data/'+filename, 'rb')
_, compleja = extraer_asientos(File(f))
assert isinstance(compleja, pd.DataFrame)
for col in ['id', 'Fecha', 'Descripción', 'Debe', 'Haber', 'Cuenta']:
assert col in compleja.columns
assert len(compleja) == 0
class TestCrearAsientos:
@pytest.fixture
def create_asiento_simple_dataframe(self, populate_database_cuentas):
_, cuentas = populate_database_cuentas
asiento_dict = {
'Fecha': ['2022-01-10', '2022-01-11', '2022-01-12', '2022-01-13'],
'Descripción': [ f'Movimiento {n+1}' for n in range(4) ],
'Valor': [10.10, 11.11, 12.12, 13.13],
'Debe': [cuentas[0].num]*4,
'Haber': [cuentas[1].num]*4,
}
simple_df = | pd.DataFrame(asiento_dict) | pandas.DataFrame |
"""Combine demand, hydro, wind, and solar traces into a single DataFrame"""
import os
import time
import pandas as pd
import matplotlib.pyplot as plt
def _pad_column(col, direction):
"""Pad values forwards or backwards to a specified date"""
# Drop missing values
df = col.dropna()
# Convert to DataFrame
df = df.to_frame()
# Options that must change depending on direction in which to pad
if direction == 'forward':
keep = 'last'
new_index = pd.date_range(start=df.index[0], end='2051-01-01 00:00:00', freq='1H')
elif direction == 'backward':
keep = 'first'
new_index = pd.date_range(start='2016-01-01 01:00:00', end=df.index[-1], freq='1H')
else:
raise Exception(f'Unexpected direction: {direction}')
# Update index
df = df.reindex(new_index)
def _get_hour_of_year(row):
"""Get hour of year"""
# Get day of year - adjust by 1 minute so last timestamp (2051-01-01 00:00:00)
# is assigned to 2050. Note this timestamp actually corresponds to the interval
# 2050-12-31 23:00:00 to 2051-01-01 00:00:00
day_timestamp = row.name - pd.Timedelta(minutes=1)
# Day of year
day = day_timestamp.dayofyear
# Hour of year
hour = ((day - 1) * 24) + day_timestamp.hour + 1
return hour
# Hour of year
df['hour_of_year'] = df.apply(_get_hour_of_year, axis=1).to_frame('hour_of_year')
# Last year with complete data
fill_year = df.dropna(subset=[col.name]).drop_duplicates(subset=['hour_of_year'], keep=keep)
# DataFrame that will have values padded forward
padded = df.reset_index().set_index('hour_of_year')
# Pad using values from last year with complete data
padded.update(fill_year.set_index('hour_of_year'), overwrite=False)
# Set timestamp as index
padded = padded.set_index('index')
# Return series
padded = padded[col.name]
return padded
def pad_dataframe(col):
"""Apply padding - forwards and backwards for each column in DataFrame"""
# Pad values forwards
padded = _pad_column(col, direction='forward')
# Pad values backwards
padded = _pad_column(padded, direction='backward')
return padded
def format_wind_traces(data_dir):
"""Format wind traces"""
# Load wind traces
df = pd.read_hdf(os.path.join(data_dir, 'wind_traces.h5'))
# Reset index and pivot
df = df.reset_index().pivot(index='timestamp', columns='bubble', values='capacity_factor')
# Pad data forward
df = df.apply(pad_dataframe)
# Add level to column index
df.columns = pd.MultiIndex.from_product([['WIND'], df.columns])
return df
def format_demand_traces(data_dir, root_data_dir):
"""
Format demand traces
Note: Only considering the 'neutral' demand scenario
"""
# Construct directory containing network data
network_dir = os.path.join(root_data_dir, 'files', 'egrimod-nem-dataset-v1.3', 'akxen-egrimod-nem-dataset-4806603',
'network')
# Load demand traces
df_region_demand = pd.read_hdf(os.path.join(data_dir, 'demand_traces.h5'))
# Only consider neutral demand scenario
df_region_demand = df_region_demand.loc[df_region_demand['scenario'] == 'Neutral', :]
# Reindex and pivot
df_region_demand = df_region_demand.reset_index().pivot(index='timestamp', columns='region', values='demand')
# Add suffix so region IDs are consistent with other MMSDM datasets
df_region_demand = df_region_demand.add_suffix('1')
# Network nodes
df_nodes = pd.read_csv(os.path.join(network_dir, 'network_nodes.csv'), index_col='NODE_ID')
# Proportion of region demand consumed in each zone
df_allocation = df_nodes.groupby(['NEM_REGION', 'NEM_ZONE'])['PROP_REG_D'].sum()
df_zone_demand = pd.DataFrame(index=df_region_demand.index, columns=df_allocation.index)
# Demand for each zone
def _get_zone_demand(row):
"""Disaggregate region demand into zonal demand"""
# Demand in each zone
zone_demand = df_allocation.loc[(row.name[0], row.name[1])] * df_region_demand[row.name[0]]
return zone_demand
# Demand in each zone
df_zone_demand = df_zone_demand.apply(_get_zone_demand)
# Remove region ID from column index
df_zone_demand = df_zone_demand.droplevel(0, axis=1)
# Pad corresponding day-hour values to extend the series to a specified date
df_zone_demand = df_zone_demand.apply(pad_dataframe)
# Add label to columns
df_zone_demand.columns = pd.MultiIndex.from_product([['DEMAND'], df_zone_demand.columns])
return df_zone_demand
def format_solar_traces(data_dir):
"""Format solar data"""
# Solar traces
df = pd.read_hdf(os.path.join(data_dir, 'solar_traces.h5'))
# Pivot so different solar technologies and their respective zones constitute the columns
# and timestamps the index
df = df.reset_index().pivot_table(index='timestamp', columns=['zone', 'technology'], values='capacity_factor')
# Merge column index levels so NEM zone and technology represented in single label
df.columns = df.columns.map('|'.join).str.strip('|')
# Pad dates (ensure data starts from 2016 and ends at end of 2050)
df = df.apply(pad_dataframe)
# Add column index denoting solar data
df.columns = pd.MultiIndex.from_product([['SOLAR'], df.columns])
return df
def format_hydro_traces(data_dir):
"""
Repeat hydro traces for each year in model horizon
Note: Assuming that hydro traces are mainly influenced by seasonal
weather events, and similar cycles are observed year to year. Signals
in 2016 are repeated for corresponding hour-day-months in the following
years. E.g. hydro output on 2016-01-06 05:00:00 is the same on
2030-01-06 05:00:00.
"""
# Hydro traces
df = pd.read_hdf(os.path.join(data_dir, 'hydro_traces.h5'))
# Add hour, day, month to DataFrame
df['hour'] = df.index.hour
df['day'] = df.index.day
df['month'] = df.index.month
# Construct new DataFrame with index from 2016 - 2050
model_horizon_index = | pd.date_range(start='2016-01-01 01:00:00', end='2051-01-01 00:00:00', freq='1H') | pandas.date_range |
import pandas as pd
import cx_Oracle
import time
import os
from datetime import date
import omdt as odt
import xlwings
import wait_handdle as wth
pt = os.getcwd()
today = date.today()
omdb = os.getcwd() + "\\" + "OMDB.csv"
# lambda <args> : <return Value> if <condition > ( <return value > if <condition> else <return value>)
TS = lambda x: '2G' if ('2G SITE DOWN' in x) \
else ('3G' if ('3G SITE DOWN' in x) \
else ('4G' if ('4G SITE DOWN' in x) \
else ('MF' if ('MAIN' in x) \
else ('DC' if ('VOLTAGE' in x) \
else "NA"))))
ExTime = int(time.strftime("%M"))
print(ExTime)
def timex():
t = time.localtime()
curr_tm = time.strftime("%H%M", t)
return curr_tm
def MACRO_RUN(fpth,comnd):
if comnd=='EX':
excelpath = os.getcwd() + '\\xlsF\\A_SEMRW.xlsm'
filepath = fpth
excel_app = xlwings.App(visible=False)
excel_book = excel_app.books.open(excelpath)
x = excel_book.macro('init')
x(filepath)
time.sleep( 30 )
return 'success'
else:
return 'Not Executed'
def qry_tg(tbl,usr, pas, selcol):
conn = cx_Oracle.connect(usr, pas, 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
print(conn.version)
tim = time.localtime()
foldr = os.getcwd() + "\\download\\" + today.strftime('%m%d%y') + time.strftime("%H%M", tim) + '_' + tbl + '.csv'
dy_p = odt.day_minus(7)
dy_f = odt.day_plus(1)
Q1 = "FROM " + tbl + " WHERE TYPE=1 AND Severity BETWEEN 1 AND 5 "
Q2 = "AND (LASTOCCURRENCE BETWEEN TO_DATE('" + dy_p + "','DD-MM-RRRR') AND TO_DATE('" + dy_f + "','DD-MM-RRRR'))"
QF = "SELECT" + selcol + Q1 + Q2
print(QF)
print('----------------')
print(timex())
df = pd.read_sql(QF, con=conn)
print(timex())
df2g = df[df['SUMMARY'].str.contains('2G SITE DOWN')]
df3g = df[df['SUMMARY'].str.contains('3G SITE DOWN')]
df4g = df[df['SUMMARY'].str.contains('4G SITE DOWN')]
dfmf = df[df['SUMMARY'].str.contains('MAIN')]
dfdl = df[df['SUMMARY'].str.contains('DC LOW')]
dftmp = df[df['SUMMARY'].str.contains('TEMP')]
dfcell = df[df['SUMMARY'].str.contains('CELL DOWN')]
dfth = df[df['SUMMARY'].str.contains('ERI-RRU THEFT')]
df_cnct = [df2g,df3g,df4g,dfmf,dfdl,dftmp,dfcell,dfth]
df_all = | pd.concat(df_cnct) | pandas.concat |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = | pd.to_datetime(['now', pd.Timestamp.min]) | pandas.to_datetime |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatParams = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatParams
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = | pd.DataFrame.from_dict(dicQDA) | pandas.DataFrame.from_dict |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190102T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_weekahead(session, site_metadata, mocker):
variable = 'net_load'
observation = default_observation(
site_metadata, variable=variable,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
run_time = pd.Timestamp('20190110T1945Z')
forecast = default_forecast(
site_metadata, variable=variable,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1d'),
interval_label='beginning')
issue_time = pd.Timestamp('20190111T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, observation, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_interval_index(session, site_metadata,
obs_5min_begin):
# index=True not supported for day ahead
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert 'index=True not supported' in str(excinfo.value)
def test_run_persistence_interval_too_long(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('48h'), # too long
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_interval_not_midnight_to_midnight(session,
site_metadata,
obs_5min_begin):
# not midnight to midnight
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=22),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2200Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_incompatible_issue(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2330Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'incompatible' in str(excinfo.value).lower()
def test_run_persistence_fx_too_short(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1min'),
run_length=pd.Timedelta('3min'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'requires observation.interval_length' in str(excinfo.value)
def test_run_persistence_incompatible_instant_fx(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'instantaneous forecast' in str(excinfo.value).lower()
def test_run_persistence_incompatible_instant_interval(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
obs = obs_5min_begin.replace(interval_label='instantaneous',
interval_length=pd.Timedelta('10min'))
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs, forecast, run_time,
issue_time)
assert 'identical interval length' in str(excinfo.value)
def test_verify_nwp_forecasts_compatible(ac_power_forecast_metadata):
fx0 = ac_power_forecast_metadata
fx1 = replace(fx0, run_length=pd.Timedelta('10h'), interval_label='ending')
df = pd.DataFrame({'forecast': [fx0, fx1], 'model': ['a', 'b']})
errs = main._verify_nwp_forecasts_compatible(df)
assert set(errs) == {'model', 'run_length', 'interval_label'}
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', True),
('{"is_reference_persistence_forecast": true}', False),
('{"is_reference_forecast": "True"}', True),
('{"is_reference_forecast":"True"}', True),
('is_reference_forecast" : "True"}', True),
('{"is_reference_forecast" : true, "otherkey": badjson, 9}', True),
('reference_forecast": true', False),
('{"is_reference_forecast": false}', False),
("is_reference_forecast", False)
])
def test_is_reference_forecast(string, expected):
assert main._is_reference_forecast(string) == expected
def test_find_reference_nwp_forecasts_json_err(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
extra_params = '{"model": "themodel", "is_reference_forecast": true}'
fxs = [replace(ac_power_forecast_metadata, extra_parameters=extra_params),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "yes"}'),
replace(ac_power_forecast_metadata, extra_parameters='{"is_reference_forecast": true'), # NOQA
replace(ac_power_forecast_metadata, extra_parameters='')]
out = main.find_reference_nwp_forecasts(fxs)
assert logger.warning.called
assert len(out) == 1
def test_find_reference_nwp_forecasts_no_model(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
fxs = [replace(ac_power_forecast_metadata, extra_parameters='{}',
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 0
assert logger.debug.called
assert logger.error.called
def test_find_reference_nwp_forecasts_no_init(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 2
assert out.next_issue_time.unique() == [None]
assert out.piggyback_on.unique() == ['0']
def test_find_reference_nwp_forecasts(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(
fxs, pd.Timestamp('20190501T0000Z'))
assert len(out) == 2
assert out.next_issue_time.unique()[0] == pd.Timestamp('20190501T0500Z')
assert out.piggyback_on.unique() == ['0']
@pytest.fixture()
def forecast_list(ac_power_forecast_metadata):
model = 'nam_12km_cloud_cover_to_hourly_mean'
prob_dict = ac_power_forecast_metadata.to_dict()
prob_dict['constant_values'] = (0, 50, 100)
prob_dict['axis'] = 'y'
prob_dict['extra_parameters'] = '{"model": "gefs_half_deg_to_hourly_mean", "is_reference_forecast": true}' # NOQA
return [replace(ac_power_forecast_metadata,
extra_parameters=(
'{"model": "%s", "is_reference_forecast": true}'
% model),
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "gfs_quarter_deg_hourly_to_hourly_mean", "is_reference_forecast": true}', # NOQA
forecast_id='1'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='2',
variable='ghi'),
datamodel.ProbabilisticForecast.from_dict(prob_dict),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='3',
variable='dni',
provider='Organization 2'
),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "badmodel", "is_reference_forecast": true}', # NOQA
forecast_id='4'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "6", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='5',
variable='ghi'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": false}' % model, # NOQA
forecast_id='7',
variable='ghi'),
]
def test_process_nwp_forecast_groups(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 4
@pytest.mark.parametrize('run_time', [None, pd.Timestamp('20190501T0000Z')])
def test_process_nwp_forecast_groups_issue_time(mocker, forecast_list,
run_time):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert post_vals.call_count == 4
run_nwp.assert_called_with(mocker.ANY, mocker.ANY, mocker.ANY,
pd.Timestamp('20190501T0500Z'))
def test_process_nwp_forecast_groups_missing_var(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-3])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert logger.warning.called
assert post_vals.call_count == 4
def test_process_nwp_forecast_groups_bad_model(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[4:-1])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 0
def test_process_nwp_forecast_groups_missing_runfor(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[-2:])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert api.post_forecast_values.call_count == 0
@pytest.mark.parametrize('ind', [0, 1, 2])
def test__post_forecast_values_regular(mocker, forecast_list, ind):
api = mocker.MagicMock()
fx = forecast_list[ind]
main._post_forecast_values(api, fx, [0], 'whatever')
assert api.post_forecast_values.call_count == 1
def test__post_forecast_values_cdf(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
main._post_forecast_values(api, fx, vals, 'gefs')
assert api.post_probabilistic_forecast_constant_value_values.call_count == 3 # NOQA
def test__post_forecast_values_cdf_not_gefs(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(ValueError):
main._post_forecast_values(api, fx, vals, 'gfs')
def test__post_forecast_values_cdf_less_cols(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(10)})
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, vals, 'gefs')
def test__post_forecast_values_cdf_not_df(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, ser, 'gefs')
def test__post_forecast_values_cdf_no_cv_match(mocker, forecast_list):
api = mocker.MagicMock()
fx = replace(forecast_list[3], constant_values=(
replace(forecast_list[3].constant_values[0], constant_value=3.0
),))
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(KeyError):
main._post_forecast_values(api, fx, vals, 'gefs')
@pytest.mark.parametrize('issue_buffer,empty', [
(pd.Timedelta('10h'), False),
(pd.Timedelta('1h'), True),
(pd.Timedelta('5h'), False)
])
def test_make_latest_nwp_forecasts(forecast_list, mocker, issue_buffer, empty):
session = mocker.patch('solarforecastarbiter.io.api.APISession')
session.return_value.get_user_info.return_value = {'organization': ''}
session.return_value.list_forecasts.return_value = forecast_list[:-3]
session.return_value.list_probabilistic_forecasts.return_value = []
run_time = pd.Timestamp('20190501T0000Z')
# last fx has different org
fxdf = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
process = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.process_nwp_forecast_groups') # NOQA
main.make_latest_nwp_forecasts('', run_time, issue_buffer)
if empty:
process.assert_not_called()
else:
assert_frame_equal(process.call_args[0][-1], fxdf)
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', False),
('{"is_reference_persistence_forecast": true}', True),
('{"is_reference_persistence_forecast": "True"}', True),
('{"is_reference_persistence_forecast":"True"}', True),
('is_reference_persistence_forecast" : "True"}', True),
('{"is_reference_persistence_forecast" : true, "otherkey": badjson, 9}',
True),
('reference_persistence_forecast": true', False),
('{"is_reference_persistence_forecast": false}', False),
("is_reference_persistence_forecast", False)
])
def test_is_reference_persistence_forecast(string, expected):
assert main._is_reference_persistence_forecast(string) == expected
@pytest.fixture
def perst_fx_obs(mocker, ac_power_observation_metadata,
ac_power_forecast_metadata):
observations = [
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
)
]
def make_extra(obs):
extra = (
'{"is_reference_persistence_forecast": true,'
f'"observation_id": "{obs.observation_id}"'
'}'
)
return extra
forecasts = [
ac_power_forecast_metadata.replace(
name='FX0',
extra_parameters=make_extra(observations[0]),
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX no persist',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX bad js',
extra_parameters='is_reference_persistence_forecast": true other',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
)
]
return forecasts, observations
def test_generate_reference_persistence_forecast_parameters(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
# one hour ahead forecast, so 14Z was made at 13Z
# enough data to do 14Z and 15Z issue times but not 16Z
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 2
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T14:00Z'),
False
)
assert param_list[1] == (
forecasts[0], observations[0],
| pd.Timestamp('2020-05-20T15:00Z') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 15:18:57 2018
@author: Denny.Lehman
"""
import pandas as pd
import numpy as np
import datetime
import time
from pandas.tseries.offsets import MonthEnd
def npv(rate, df):
value = 0
for i in range(0, df.size):
value += df.iloc[i] / (1 + rate) ** (i + 1)
return value
# use this function if finance wants to use the faster CSV version. This version requires proper data cleaning steps
def get_datatape_csv(filepath):
list_of_column_names_required = ['System Project: Sunnova System ID', 'Committed Capital', 'Quote: Recurring Payment', 'Quote: Contract Type' ,'Quote: Payment Escalator', 'InService Date', 'Asset Portfolio - Partner', 'Location']
rename_list = {'System Project: Sunnova System ID':'ID','Committed Capital':'Committed Capital', 'Quote: Contract Type' :'Contract Type', 'Quote: Recurring Payment':'Recurring Payment','Quote: Payment Escalator':'Escalator', 'Location' : 'State'}
d_types = {'System Project: Sunnova System ID':str, 'Committed Capital':np.float64, 'Quote: Recurring Payment': np.float64, 'Quote: Contract Type':str, 'Quote: Payment Escalator':str, 'Asset Portfolio - Partner':str }
parse_dates = ['InService Date']
names=['ID', 'CC', 'RP', 'CT', 'PE', 'InS', 'AP']
df3 = | pd.read_csv(filepath, sep=',', skiprows=0, header=2) | pandas.read_csv |
import unittest
import pandas
from data_set_info_data_class.data_class.data_set_info import DataSetInfo
from data_set_remover.classes.data_class.data_for_criteria_remove import DataForCriteriaRemove
from data_set_remover.depedency_injector.container import Container
from data_set_remover.exceptions.remover_exceptions import WrongInputFormatError, NonIterableObjectError, \
NonExistingDataSetWithGivenNameError, ColumnArraysShouldNotBeBothEmpty, ColumnArraysShouldNotBeBothFilled, \
WrongCriteriaNameError, MissingColumnToIncludeError, MissingPercentCriteriaValueMustBeBetween1and99, \
UniqueImpressionCriteriaValueMustBeGreaterThan1
class DataSetsRemoverTestBase(unittest.TestCase):
pass
class DataSetsRemoverTestErrorCases(DataSetsRemoverTestBase):
def setUp(self):
self.data_set_remover = Container.data_set_remover()
def test_given_none_when_remove_data_set_by_name_then_throw_wrong_input_format_error(self):
with self.assertRaises(WrongInputFormatError):
self.data_set_remover.remove_manually(None, None)
def test_given_non_array_input_when_remove_data_sets_then_throw_non_iterable_input_error(self):
non_iterable_input = 1
with self.assertRaises(NonIterableObjectError):
self.data_set_remover.remove_manually(non_iterable_input, "")
def test_given_array_with_wrong_element_and_data_set_name_type_when_remove_data_sets_then_throw_wrong_input_format_error(
self):
input_with_wrong_element_type = [1, 2, 3]
with self.assertRaises(WrongInputFormatError):
self.data_set_remover.remove_manually(input_with_wrong_element_type, 1)
def test_given_array_with_data_set_and_wrong_data_set_name_when_remove_data_set_then_throw_not_existing_data_set_name(
self):
data_set_info = DataSetInfo("Test", pandas.DataFrame([]), [], [])
wrong_data_set_name = "TestTest"
with self.assertRaises(NonExistingDataSetWithGivenNameError):
self.data_set_remover.remove_manually([data_set_info], wrong_data_set_name)
class DataSetsRemoveByCriteriaTestErrorCases(DataSetsRemoverTestBase):
def setUp(self):
self.data_set_remover = Container.data_set_remover()
self.data_set_info = DataSetInfo("Test", | pandas.DataFrame([[1]], columns=["Test"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pdb
import sys
sys.path.append('../data')
from pytorch_data_operations import buildLakeDataForRNN_manylakes_finetune2, parseMatricesFromSeqs
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import Dataset, DataLoader
from torch.nn.init import xavier_normal_
from sklearn.ensemble import GradientBoostingRegressor
import math
import re
#######################################################################
# (Sept 2020 - Jared) - this script uses cross validation on the training lakes
# to estimate the best number of lakes "k" to ensemble
#############################################################################
use_gpu = True
ids = | pd.read_csv('../../metadata/pball_site_ids.csv', header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 13:26:04 2020
@author: alex1
"""
import math
import numpy as np
import pandas as pd
# # debug
# mp = MpFunctions(data=df, freq=2, style='tpo', avglen=8, ticksize=24, session_hr=24)
# mplist = mp.get_context()
# #mplist[1]
# meandict = mp.get_mean()
# #meandict['volume_mean']
class MpFunctions():
def __init__(self, data, freq=30, style='tpo', avglen=8, ticksize=8, session_hr=8):
self.data = data
self.freq = freq
self.style = style
self.avglen = avglen
self.ticksize = ticksize
self.session_hr = session_hr
def get_ticksize(self):
# data = df
numlen = int(len(self.data) / 2)
# sample size for calculating ticksize = 50% of most recent data
tztail = self.data.tail(numlen).copy()
tztail['tz'] = tztail.Close.rolling(self.freq).std() # std. dev of 30 period rolling
tztail = tztail.dropna()
ticksize = np.ceil(tztail['tz'].mean() * 0.25) # 1/4 th of mean std. dev is our ticksize
if ticksize < 0.2:
ticksize = 0.2 # minimum ticksize limit
return int(ticksize)
def abc(self):
caps = [' A', ' B', ' C', ' D', ' E', ' F', ' G', ' H', ' I', ' J', ' K', ' L', ' M',
' N', ' O', ' P', ' Q', ' R', ' S', ' T', ' U', ' V', ' W', ' X', ' Y', ' Z']
abc_lw = [x.lower() for x in caps]
Aa = caps + abc_lw
alimit = math.ceil(self.session_hr * (60 / self.freq)) + 3
if alimit > 52:
alphabets = Aa * int(
(np.ceil((alimit - 52) / 52)) + 1) # if bar frequency is less than 30 minutes then multiply list
else:
alphabets = Aa[0:alimit]
bk = [28, 31, 35, 40, 33, 34, 41, 44, 35, 52, 41, 40, 46, 27, 38]
ti = []
for s1 in bk:
ti.append(Aa[s1 - 1])
tt = (''.join(ti))
return alphabets, tt
def get_rf(self):
self.data['cup'] = np.where(self.data['Close'] >= self.data['Close'].shift(), 1, -1)
self.data['hup'] = np.where(self.data['High'] >= self.data['High'].shift(), 1, -1)
self.data['lup'] = np.where(self.data['Low'] >= self.data['Low'].shift(), 1, -1)
self.data['rf'] = self.data['cup'] + self.data['hup'] + self.data['lup']
dataf = self.data.drop(['cup', 'lup', 'hup'], axis=1)
return dataf
def get_mean(self):
"""
dfhist: pandas dataframe 1 min frequency
avglen: Length for mean values
freq: timeframe for the candlestick & TPOs
return: a) daily mean for volume, rotational factor (absolute value), IB volume, IB RF b) session length
dfhist = df.copy()
"""
dfhist = self.get_rf()
# dfhist = get_rf(dfhist.copy())
dfhistd = dfhist.resample("D").agg(
{'Open': 'first', 'High': 'max', 'Low': 'min', 'Close': 'last', 'volume': 'sum',
'rf': 'sum', })
dfhistd = dfhistd.dropna()
comp_days = len(dfhistd)
vm30 = dfhistd['volume'].rolling(self.avglen).mean()
volume_mean = vm30[len(vm30) - 1]
rf30 = abs((dfhistd['rf'])).rolling(
self.avglen).mean() # it is abs mean to get meaningful value to compare daily values
rf_mean = rf30[len(rf30) - 1]
date2 = dfhistd.index[1].date()
mask = dfhist.index.date < date2
dfsession = dfhist.loc[mask]
session_hr = math.ceil(len(dfsession) / 60)
all_val = dict(volume_mean=volume_mean, rf_mean=rf_mean, session_hr=session_hr)
return all_val
def tpo(self, dft_rs):
# dft_rs = dfc1.copy()
# if len(dft_rs) > int(60 / freq):
if len(dft_rs) > int(0):
dft_rs = dft_rs.drop_duplicates('datetime')
dft_rs = dft_rs.reset_index(inplace=False, drop=True)
dft_rs['rol_mx'] = dft_rs['High'].cummax()
dft_rs['rol_mn'] = dft_rs['Low'].cummin()
dft_rs['ext_up'] = dft_rs['rol_mn'] > dft_rs['rol_mx'].shift(2)
dft_rs['ext_dn'] = dft_rs['rol_mx'] < dft_rs['rol_mn'].shift(2)
alphabets = self.abc()[0]
# alphabets = abc(session_hr, freq)[0]
alphabets = alphabets[0:len(dft_rs)]
hh = dft_rs['High'].max()
ll = dft_rs['Low'].min()
day_range = hh - ll
dft_rs['abc'] = alphabets
# place represents total number of steps to take to compare the TPO count
place = int(np.ceil((hh - ll) / self.ticksize))
# kk = 0
abl_bg = []
tpo_countbg = []
pricel = []
volcountbg = []
# datel = []
for u in range(place):
abl = []
tpoc = []
volcount = []
p = ll + (u * self.ticksize)
for lenrs in range(len(dft_rs)):
if p >= dft_rs['Low'][lenrs] and p < dft_rs['High'][lenrs]:
abl.append(dft_rs['abc'][lenrs])
tpoc.append(1)
volcount.append((dft_rs['volume'][lenrs]) / self.freq)
abl_bg.append(''.join(abl))
tpo_countbg.append(sum(tpoc))
volcountbg.append(sum(volcount))
pricel.append(p)
dftpo = pd.DataFrame({'close': pricel, 'alphabets': abl_bg,
'tpocount': tpo_countbg, 'volsum': volcountbg})
# drop empty rows
dftpo['alphabets'].replace('', np.nan, inplace=True)
dftpo = dftpo.dropna()
dftpo = dftpo.reset_index(inplace=False, drop=True)
dftpo = dftpo.sort_index(ascending=False)
dftpo = dftpo.reset_index(inplace=False, drop=True)
if self.style == 'tpo':
column = 'tpocount'
else:
column = 'volsum'
dfmx = dftpo[dftpo[column] == dftpo[column].max()]
mid = ll + ((hh - ll) / 2)
dfmax = dfmx.copy()
dfmax['poc-mid'] = abs(dfmax['close'] - mid)
pocidx = dfmax['poc-mid'].idxmin()
poc = dfmax['close'][pocidx]
poctpo = dftpo[column].max()
tpo_updf = dftpo[dftpo['close'] > poc]
tpo_updf = tpo_updf.sort_index(ascending=False)
tpo_updf = tpo_updf.reset_index(inplace=False, drop=True)
tpo_dndf = dftpo[dftpo['close'] < poc]
tpo_dndf = tpo_dndf.reset_index(inplace=False, drop=True)
valtpo = (dftpo[column].sum()) * 0.70
abovepoc = tpo_updf[column].to_list()
belowpoc = tpo_dndf[column].to_list()
if (len(abovepoc) / 2).is_integer() is False:
abovepoc = abovepoc + [0]
if (len(belowpoc) / 2).is_integer() is False:
belowpoc = belowpoc + [0]
bel2 = np.array(belowpoc).reshape(-1, 2)
bel3 = bel2.sum(axis=1)
bel4 = list(bel3)
abv2 = np.array(abovepoc).reshape(-1, 2)
abv3 = abv2.sum(axis=1)
abv4 = list(abv3)
# cum = poctpo
# up_i = 0
# dn_i = 0
df_va = pd.DataFrame({'abv': pd.Series(abv4), 'bel': | pd.Series(bel4) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_parallel_comparing_api(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
def test_parallel_comparing(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=4)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_4processes = comp.compute(self.index_AB, self.A, self.B)
result_4processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
pdt.assert_frame_equal(result_single, result_4processes)
def test_pickle(self):
# test if it is possible to pickle the Compare class
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.numeric('number', 'number')
comp.geo('lat', 'lng', 'lat', 'lng')
comp.date('before', 'after')
# do the test
pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle')
pickle.dump(comp, open(pickle_path, 'wb'))
def test_manual_parallel_joblib(self):
# test if it is possible to pickle the Compare class
# This is only available for python 3. For python 2, it is not
# possible to pickle instancemethods. A workaround can be found at
# https://stackoverflow.com/a/29873604/8727928
if sys.version.startswith("3"):
# import joblib dependencies
from joblib import Parallel, delayed
# split the data into smaller parts
len_index = int(len(self.index_AB) / 2)
df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]]
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.string('lastname', 'lastname')
comp.exact('street', 'street')
# do in parallel
Parallel(n_jobs=2)(
delayed(comp.compute)(df_chunks[i], self.A, self.B)
for i in [0, 1])
def test_indexing_types(self):
# test the two types of indexing
# this test needs improvement
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B_reversed = B[::-1].copy()
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
comp_label = recordlinkage.Compare(indexing_type='label')
comp_label.exact('col', 'col')
result_label = comp_label.compute(ix, A, B_reversed)
# test with position indexing type
comp_position = recordlinkage.Compare(indexing_type='position')
comp_position.exact('col', 'col')
result_position = comp_position.compute(ix, A, B_reversed)
assert (result_position.values == 1).all(axis=0)
pdt.assert_frame_equal(result_label, result_position)
def test_pass_list_of_features(self):
from recordlinkage.compare import FrequencyA, VariableA, VariableB
# setup datasets and record pairs
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
features = [
VariableA('col', label='y1'),
VariableB('col', label='y2'),
FrequencyA('col', label='y3')
]
comp_label = recordlinkage.Compare(features=features)
result_label = comp_label.compute(ix, A, B)
assert list(result_label) == ["y1", "y2", "y3"]
class TestCompareFeatures(TestData):
def test_feature(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1))
feature.compute(ix, A, B)
def test_feature_multicolumn_return(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def ones(s1, s2):
return DataFrame(np.ones((len(s1), 3)))
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = ones
result = feature.compute(ix, A, B)
assert result.shape == (5, 3)
def test_feature_multicolumn_input(self):
# test using classes and the base class
A = DataFrame({
'col1': ['abc', 'abc', 'abc', 'abc', 'abc'],
'col2': ['abc', 'abc', 'abc', 'abc', 'abc']
})
B = DataFrame({
'col1': ['abc', 'abd', 'abc', 'abc', '123'],
'col2': ['abc', 'abd', 'abc', 'abc', '123']
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2'])
feature._f_compare_vectorized = \
lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1))
feature.compute(ix, A, B)
class TestCompareExact(TestData):
"""Test the exact comparison method."""
def test_exact_str_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 0, 1, 1, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_exact_num_type(self):
A = DataFrame({'col': [42, 42, 41, 43, nan]})
B = DataFrame({'col': [42, 42, 42, 42, 42]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 1, 0, 0, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_link_exact_missing(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='na_')
comp.exact('col', 'col', missing_value=0, label='na_0')
comp.exact('col', 'col', missing_value=9, label='na_9')
comp.exact('col', 'col', missing_value=nan, label='na_na')
comp.exact('col', 'col', missing_value='str', label='na_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_')
pdt.assert_series_equal(result['na_'], expected)
# Missing values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_0')
pdt.assert_series_equal(result['na_0'], expected)
# Missing values as 9
expected = Series([1, 1, 0, 9, 9], index=ix, name='na_9')
pdt.assert_series_equal(result['na_9'], expected)
# Missing values as nan
expected = Series([1, 1, 0, nan, nan], index=ix, name='na_na')
pdt.assert_series_equal(result['na_na'], expected)
# Missing values as string
expected = Series([1, 1, 0, 'str', 'str'], index=ix, name='na_str')
pdt.assert_series_equal(result['na_str'], expected)
def test_link_exact_disagree(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='d_')
comp.exact('col', 'col', disagree_value=0, label='d_0')
comp.exact('col', 'col', disagree_value=9, label='d_9')
comp.exact('col', 'col', disagree_value=nan, label='d_na')
comp.exact('col', 'col', disagree_value='str', label='d_str')
result = comp.compute(ix, A, B)
# disagree values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_')
pdt.assert_series_equal(result['d_'], expected)
# disagree values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_0')
pdt.assert_series_equal(result['d_0'], expected)
# disagree values as 9
expected = Series([1, 1, 9, 0, 0], index=ix, name='d_9')
pdt.assert_series_equal(result['d_9'], expected)
# disagree values as nan
expected = Series([1, 1, nan, 0, 0], index=ix, name='d_na')
pdt.assert_series_equal(result['d_na'], expected)
# disagree values as string
expected = Series([1, 1, 'str', 0, 0], index=ix, name='d_str')
pdt.assert_series_equal(result['d_str'], expected)
# tests/test_compare.py:TestCompareNumeric
class TestCompareNumeric(TestData):
"""Test the numeric comparison methods."""
def test_numeric(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 2, 3, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', 'step', offset=2)
comp.numeric('col', 'col', method='step', offset=2)
comp.numeric('col', 'col', 'step', 2)
result = comp.compute(ix, A, B)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=1)
pdt.assert_series_equal(result[1], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
def test_numeric_with_missings(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', scale=2)
comp.numeric('col', 'col', scale=2, missing_value=0)
comp.numeric('col', 'col', scale=2, missing_value=123.45)
comp.numeric('col', 'col', scale=2, missing_value=nan)
comp.numeric('col', 'col', scale=2, missing_value='str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Missing values as 0
expected = Series(
[1.0, 1.0, 1.0, 0.0, 0.0], index=ix, dtype=np.float64, name=1)
pdt.assert_series_equal(result[1], expected)
# Missing values as 123.45
expected = Series([1.0, 1.0, 1.0, 123.45, 123.45], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
# Missing values as nan
expected = Series([1.0, 1.0, 1.0, nan, nan], index=ix, name=3)
pdt.assert_series_equal(result[3], expected)
# Missing values as string
expected = Series(
[1, 1, 1, 'str', 'str'], index=ix, dtype=object, name=4)
pdt.assert_series_equal(result[4], expected)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms(self, alg):
A = DataFrame({'col': [1, 1, 1, 1, 1]})
B = DataFrame({'col': [1, 2, 3, 4, 5]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='step', offset=1, label='step')
comp.numeric(
'col', 'col', method='linear', offset=1, scale=2, label='linear')
comp.numeric(
'col', 'col', method='squared', offset=1, scale=2, label='squared')
comp.numeric(
'col', 'col', method='exp', offset=1, scale=2, label='exp')
comp.numeric(
'col', 'col', method='gauss', offset=1, scale=2, label='gauss')
result_df = comp.compute(ix, A, B)
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
if alg != 'step':
print(alg)
print(result)
# sim(scale) = 0.5
expected_bool = Series(
[False, False, False, True, False], index=ix, name=alg)
pdt.assert_series_equal(result == 0.5, expected_bool)
# sim(offset) = 1
expected_bool = Series(
[True, True, False, False, False], index=ix, name=alg)
pdt.assert_series_equal(result == 1.0, expected_bool)
# sim(scale) larger than 0.5
expected_bool = Series(
[False, False, True, False, False], index=ix, name=alg)
pdt.assert_series_equal((result > 0.5) & (result < 1.0),
expected_bool)
# sim(scale) smaller than 0.5
expected_bool = Series(
[False, False, False, False, True], index=ix, name=alg)
pdt.assert_series_equal((result < 0.5) & (result >= 0.0),
expected_bool)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms_errors(self, alg):
# scale negative
if alg != "step":
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
# offset negative
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=-2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
def test_numeric_does_not_exist(self):
# raise when algorithm doesn't exists
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='unknown_algorithm')
pytest.raises(ValueError, comp.compute, ix, A, B)
# tests/test_compare.py:TestCompareDates
class TestCompareDates(TestData):
"""Test the exact comparison method."""
def test_dates(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col')
result = comp.compute(ix, A, B)[0]
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name=0)
pdt.assert_series_equal(result, expected)
def test_date_incorrect_dtype(self):
A = DataFrame({
'col':
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30']
})
B = DataFrame({
'col': [
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
A['col1'] = to_datetime(A['col'])
B['col1'] = to_datetime(B['col'])
comp = recordlinkage.Compare()
comp.date('col', 'col1')
pytest.raises(ValueError, comp.compute, ix, A, B)
comp = recordlinkage.Compare()
comp.date('col1', 'col')
pytest.raises(ValueError, comp.compute, ix, A, B)
def test_dates_with_missings(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='m_')
comp.date('col', 'col', missing_value=0, label='m_0')
comp.date('col', 'col', missing_value=123.45, label='m_float')
comp.date('col', 'col', missing_value=nan, label='m_na')
comp.date('col', 'col', missing_value='str', label='m_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_')
pdt.assert_series_equal(result['m_'], expected)
# Missing values as 0
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_0')
pdt.assert_series_equal(result['m_0'], expected)
# Missing values as 123.45
expected = Series([1, 123.45, 0, 0.5, 0.5], index=ix, name='m_float')
pdt.assert_series_equal(result['m_float'], expected)
# Missing values as nan
expected = Series([1, nan, 0, 0.5, 0.5], index=ix, name='m_na')
pdt.assert_series_equal(result['m_na'], expected)
# Missing values as string
expected = Series(
[1, 'str', 0, 0.5, 0.5], index=ix, dtype=object, name='m_str')
pdt.assert_series_equal(result['m_str'], expected)
def test_dates_with_swap(self):
months_to_swap = [(9, 10, 123.45), (10, 9, 123.45), (1, 2, 123.45),
(2, 1, 123.45)]
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='s_')
comp.date(
'col', 'col', swap_month_day=0, swap_months='default', label='s_1')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months='default',
label='s_2')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months=months_to_swap,
label='s_3')
comp.date(
'col',
'col',
swap_month_day=nan,
swap_months='default',
missing_value=nan,
label='s_4')
comp.date('col', 'col', swap_month_day='str', label='s_5')
result = comp.compute(ix, A, B)
# swap_month_day as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='s_')
pdt.assert_series_equal(result['s_'], expected)
# swap_month_day and swap_months as 0
expected = Series([1, 0, 0, 0, 0.5], index=ix, name='s_1')
pdt.assert_series_equal(result['s_1'], expected)
# swap_month_day 123.45 (float)
expected = Series([1, 0, 0, 123.45, 0.5], index=ix, name='s_2')
pdt.assert_series_equal(result['s_2'], expected)
# swap_month_day and swap_months 123.45 (float)
expected = Series([1, 0, 0, 123.45, 123.45], index=ix, name='s_3')
pdt.assert_series_equal(result['s_3'], expected)
# swap_month_day and swap_months as nan
expected = Series([1, nan, 0, nan, 0.5], index=ix, name='s_4')
pdt.assert_series_equal(result['s_4'], expected)
# swap_month_day as string
expected = Series(
[1, 0, 0, 'str', 0.5], index=ix, dtype=object, name='s_5')
pdt.assert_series_equal(result['s_5'], expected)
# tests/test_compare.py:TestCompareGeo
class TestCompareGeo(TestData):
"""Test the geo comparison method."""
def test_geo(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step',
offset=50) # 50 km range
result = comp.compute(ix, A, B)
# Missing values as default [36.639460, 54.765854, 44.092472]
expected = Series([1.0, 0.0, 1.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
def test_geo_batch(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step', offset=1, label='step')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='linear',
offset=1,
scale=2,
label='linear')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='squared',
offset=1,
scale=2,
label='squared')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='exp',
offset=1,
scale=2,
label='exp')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='gauss',
offset=1,
scale=2,
label='gauss')
result_df = comp.compute(ix, A, B)
print(result_df)
for alg in ['step', 'linear', 'squared', 'exp', 'gauss']:
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
def test_geo_does_not_exist(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo('lat', 'lng', 'lat', 'lng', method='unknown')
pytest.raises(ValueError, comp.compute, ix, A, B)
class TestCompareStrings(TestData):
"""Test the exact comparison method."""
def test_defaults(self):
# default algorithm is levenshtein algorithm
# test default values are indentical to levenshtein
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', label='default')
comp.string('col', 'col', method='levenshtein', label='with_args')
result = comp.compute(ix, A, B)
pdt.assert_series_equal(
result['default'].rename(None),
result['with_args'].rename(None)
)
def test_fuzzy(self):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method='jaro', missing_value=0)
comp.string('col', 'col', method='q_gram', missing_value=0)
comp.string('col', 'col', method='cosine', missing_value=0)
comp.string('col', 'col', method='jaro_winkler', missing_value=0)
comp.string('col', 'col', method='dameraulevenshtein', missing_value=0)
comp.string('col', 'col', method='levenshtein', missing_value=0)
result = comp.compute(ix, A, B)
print(result)
assert result.notnull().all(1).all(0)
assert (result[result.notnull()] >= 0).all(1).all(0)
assert (result[result.notnull()] <= 1).all(1).all(0)
def test_threshold(self):
A = DataFrame({'col': [u"gretzky", u"gretzky99", u"gretzky", u"gretzky"]})
B = DataFrame({'col': [u"gretzky", u"gretzky", nan, u"wayne"]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.5,
missing_value=2.0,
label="x_col1"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=1.0,
missing_value=0.5,
label="x_col2"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.0,
missing_value=nan,
label="x_col3"
)
result = comp.compute(ix, A, B)
expected = Series([1.0, 1.0, 2.0, 0.0], index=ix, name="x_col1")
pdt.assert_series_equal(result["x_col1"], expected)
expected = Series([1.0, 0.0, 0.5, 0.0], index=ix, name="x_col2")
pdt.assert_series_equal(result["x_col2"], expected)
expected = Series([1.0, 1.0, nan, 1.0], index=ix, name="x_col3")
pdt.assert_series_equal(result["x_col3"], expected)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_incorrect_input(self, alg):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
with pytest.raises(Exception):
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg)
comp.compute(ix, A, B)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_string_algorithms_nan(self, alg):
A = DataFrame({'col': [u"nan", nan, nan, nan, nan]})
B = DataFrame({'col': [u"nan", nan, nan, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, 0.0, 0.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result, expected)
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=nan)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, nan, nan, nan, nan], index=ix, name=0)
| pdt.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree():
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
m = Node('m', children=[p])
p = m['p']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 1
assert 'p' in m.children
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
i = 1
s.update(dts[i], data.ix[dts[i]])
c1.price == 105
c2.price == 95
i = 2
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.ix[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.ix[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.ix[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.ix[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
import functools
import numpy as np
import scipy
import scipy.linalg
import scipy
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import logging
import tables as tb
import os
import sandy
import pytest
pd.options.display.float_format = '{:.5e}'.format
__author__ = "<NAME>"
__all__ = [
"CategoryCov",
"EnergyCov",
"triu_matrix",
"corr2cov",
"random_corr",
"random_cov",
"sample_distribution",
]
S = np.array([[1, 1, 1],
[1, 2, 1],
[1, 3, 1]])
var = np.array([[0, 0, 0],
[0, 2, 0],
[0, 0, 3]])
minimal_covtest = pd.DataFrame(
[[9437, 2, 1e-2, 9437, 2, 1e-2, 0.02],
[9437, 2, 2e5, 9437, 2, 2e5, 0.09],
[9437, 2, 1e-2, 9437, 102, 1e-2, 0.04],
[9437, 2, 2e5, 9437, 102, 2e5, 0.05],
[9437, 102, 1e-2, 9437, 102, 1e-2, 0.01],
[9437, 102, 2e5, 9437, 102, 2e5, 0.01]],
columns=["MAT", "MT", "E", "MAT1", "MT1", 'E1', "VAL"]
)
def cov33csv(func):
def inner(*args, **kwargs):
key = "<KEY>"
kw = kwargs.copy()
if key in kw:
if kw[key]:
print(f"found argument '{key}', ignore oher arguments")
out = func(
*args,
index_col=[0, 1, 2],
header=[0, 1, 2],
)
out.index.names = ["MAT", "MT", "E"]
out.columns.names = ["MAT", "MT", "E"]
return out
else:
del kw[key]
out = func(*args, **kw)
return out
return inner
class _Cov(np.ndarray):
"""Covariance matrix treated as a `numpy.ndarray`.
Methods
-------
corr
extract correlation matrix
corr2cov
produce covariance matrix given correlation matrix and standard
deviation array
eig
get covariance matrix eigenvalues and eigenvectors
get_L
decompose and extract lower triangular matrix
sampling
draw random samples
"""
def __new__(cls, arr):
obj = np.ndarray.__new__(cls, arr.shape, float)
obj[:] = arr[:]
if not obj.ndim == 2:
raise sandy.Error("covariance matrix must have two dimensions")
if not np.allclose(obj, obj.T):
raise sandy.Error("covariance matrix must be symmetric")
if (np.diag(arr) < 0).any():
raise sandy.Error("covariance matrix must have positive variances")
return obj
@staticmethod
def _up2down(self):
U = np.triu(self)
L = np.triu(self, 1).T
C = U + L
return C
def eig(self):
"""
Extract eigenvalues and eigenvectors.
Returns
-------
`Pandas.Series`
real part of eigenvalues sorted in descending order
`np.array`
matrix of eigenvectors
"""
E, V = scipy.linalg.eig(self)
E, V = E.real, V.real
return E, V
def corr(self):
"""Extract correlation matrix.
.. note:: zeros on the covariance matrix diagonal are translated
into zeros also on the the correlation matrix diagonal.
Returns
-------
`sandy.formats.utils.Cov`
correlation matrix
"""
std = np.sqrt(np.diag(self))
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, std)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(self.T, coeff).T, coeff)
return self.__class__(corr)
def _reduce_size(self):
"""
Reduces the size of the matrix, erasing the null values.
Returns
-------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
"""
nonzero_idxs = np.flatnonzero(np.diag(self))
cov_reduced = self[nonzero_idxs][:, nonzero_idxs]
return nonzero_idxs, cov_reduced
@classmethod
def _restore_size(cls, nonzero_idxs, cov_reduced, dim):
"""
Restore the size of the matrix
Parameters
----------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
dim : int
Dimension of the original matrix.
Returns
-------
cov : sandy.core.cov._Cov
Matrix of specified dimensions.
"""
cov = _Cov(np.zeros((dim, dim)))
for i, ni in enumerate(nonzero_idxs):
cov[ni, nonzero_idxs] = cov_reduced[i]
return cov
def sampling(self, nsmp, seed=None):
"""
Extract random samples from the covariance matrix, either using
the cholesky or the eigenvalue decomposition.
Parameters
----------
nsmp : `int`
number of samples
seed : `int`
seed for the random number generator (default is `None`)
Returns
-------
`np.array`
2D array of random samples with dimension `(self.shape[0], nsmp)`
"""
dim = self.shape[0]
np.random.seed(seed=seed)
y = np.random.randn(dim, nsmp)
nonzero_idxs, cov_reduced = self._reduce_size()
L_reduced = cov_reduced.get_L()
L = self.__class__._restore_size(nonzero_idxs, L_reduced, dim)
samples = np.array(L.dot(y))
return samples
def get_L(self):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Returns
-------
`np.array`
lower triangular matrix
"""
try:
L = scipy.linalg.cholesky(
self,
lower=True,
overwrite_a=False,
check_finite=False
)
except np.linalg.linalg.LinAlgError:
E, V = self.eig()
E[E <= 0] = 0
Esqrt = np.diag(np.sqrt(E))
M = V.dot(Esqrt)
Q, R = scipy.linalg.qr(M.T)
L = R.T
return L
class CategoryCov():
"""
Properties
----------
data
covariance matrix as a dataframe
size
first dimension of the covariance matrix
Methods
-------
corr2cov
create a covariance matrix given a correlation matrix and a standard
deviation vector
from_stack
create a covariance matrix from a stacked `pd.DataFrame`
from_stdev
construct a covariance matrix from a stdev vector
from_var
construct a covariance matrix from a variance vector
get_corr
extract correlation matrix from covariance matrix
get_eig
extract eigenvalues and eigenvectors from covariance matrix
get_L
extract lower triangular matrix such that $C=L L^T$
get_std
extract standard deviations from covariance matrix
invert
calculate the inverse of the matrix
sampling
extract perturbation coefficients according to chosen distribution
and covariance matrix
"""
def __repr__(self):
return self.data.__repr__()
def __init__(self, *args, **kwargs):
self.data = pd.DataFrame(*args, **kwargs)
@property
def data(self):
"""
Covariance matrix as a dataframe.
Attributes
----------
index : `pandas.Index` or `pandas.MultiIndex`
indices
columns : `pandas.Index` or `pandas.MultiIndex`
columns
values : `numpy.array`
covariance values as `float`
Returns
-------
`pandas.DataFrame`
covariance matrix
Notes
-----
..note :: In the future, another tests will be implemented to check
that the covariance matrix is symmetric and have positive variances.
Examples
--------
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array[1])
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [2, -4]]))
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [3, 4]]))
"""
return self._data
@data.setter
def data(self, data):
self._data = pd.DataFrame(data, dtype=float)
if not len(data.shape) == 2 and data.shape[0] == data.shape[1]:
raise TypeError("Covariance matrix must have two dimensions")
if not (np.diag(data) >= 0).all():
raise TypeError("Covariance matrix must have positive variance")
sym_limit = 10
# Round to avoid numerical fluctuations
if not (data.values.round(sym_limit) == data.values.T.round(sym_limit)).all():
raise TypeError("Covariance matrix must be symmetric")
@property
def size(self):
return self.data.values.shape[0]
def get_std(self):
"""
Extract standard deviations.
Returns
-------
`pandas.Series`
1d array of standard deviations
Examples
--------
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).get_std()
0 1.00000e+00
1 1.00000e+00
Name: STD, dtype: float64
"""
cov = self.to_sparse().diagonal()
std = np.sqrt(cov)
return pd.Series(std, index=self.data.index, name="STD")
def get_eig(self, tolerance=None):
"""
Extract eigenvalues and eigenvectors.
Parameters
----------
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
The replacement condition is implemented as:
.. math::
$$
\frac{e_i}{e_{MAX}} < tolerance
$$
Then, a `tolerance=1e-3` will replace all eigenvalues
1000 times smaller than the largest eigenvalue.
A `tolerance=0` will replace all negative eigenvalues.
Returns
-------
`Pandas.Series`
array of eigenvalues
`pandas.DataFrame`
matrix of eigenvectors
Notes
-----
.. note:: only the real part of the eigenvalues is preserved
.. note:: the discussion associated to the implementeation
of this algorithm is available [here](https://github.com/luca-fiorito-11/sandy/discussions/135)
Examples
--------
Extract eigenvalues of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[0]
0 1.40000e+00
1 6.00000e-01
Name: EIG, dtype: float64
Extract eigenvectors of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[1]
0 1
0 7.07107e-01 -7.07107e-01
1 7.07107e-01 7.07107e-01
Extract eigenvalues of covariance matrix.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig()[0]
0 8.90228e-02
1 1.01098e+00
Name: EIG, dtype: float64
Set up a tolerance.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig(tolerance=0.1)[0]
0 0.00000e+00
1 1.01098e+00
Name: EIG, dtype: float64
Test with negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig()[0]
0 3.00000e+00
1 -1.00000e+00
Name: EIG, dtype: float64
Replace negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig(tolerance=0)[0]
0 3.00000e+00
1 0.00000e+00
Name: EIG, dtype: float64
Check output size.
>>> cov = sandy.CategoryCov.random_cov(50, seed=11)
>>> assert cov.get_eig()[0].size == cov.data.shape[0] == 50
>>> sandy.CategoryCov([[1, 0.2, 0.1], [0.2, 2, 0], [0.1, 0, 3]]).get_eig()[0]
0 9.56764e-01
1 2.03815e+00
2 3.00509e+00
Name: EIG, dtype: float64
Real test on H1 file
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> ek = sandy.energy_grids.CASMO12
>>> err = endf6.get_errorr(ek_errorr=ek, err=1)
>>> cov = err.get_cov()
>>> cov.get_eig()[0].sort_values(ascending=False).head(7)
0 3.66411e-01
1 7.05311e-03
2 1.55346e-03
3 1.60175e-04
4 1.81374e-05
5 1.81078e-06
6 1.26691e-07
Name: EIG, dtype: float64
>>> assert not (cov.get_eig()[0] >= 0).all()
>>> assert (cov.get_eig(tolerance=0)[0] >= 0).all()
"""
E, V = scipy.linalg.eig(self.data)
E = pd.Series(E.real, name="EIG")
V = pd.DataFrame(V.real)
if tolerance is not None:
E[E/E.max() < tolerance] = 0
return E, V
def get_corr(self):
"""
Extract correlation matrix.
Returns
-------
df : :obj: `CetgoryCov`
correlation matrix
Examples
--------
>>> sandy.CategoryCov([[4, 2.4],[2.4, 9]]).get_corr()
0 1
0 1.00000e+00 4.00000e-01
1 4.00000e-01 1.00000e+00
"""
cov = self.data.values
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, self.get_std().values)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(cov, coeff).T, coeff)
df = pd.DataFrame(
corr,
index=self.data.index,
columns=self.data.columns,
)
return self.__class__(df)
def invert(self, rows=None):
"""
Method for calculating the inverse matrix.
Parameters
----------
tables : `bool`, optional
Option to use row calculation for matrix calculations. The
default is False.
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`CategoryCov`
The inverse matrix.
Examples
--------
>>> S = sandy.CategoryCov(np.diag(np.array([1, 2, 3])))
>>> S.invert()
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert()
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert(rows=1)
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
"""
index = self.data.index
columns = self.data.columns
M_nonzero_idxs, M_reduce = reduce_size(self.data)
cov = sps.csc_matrix(M_reduce.values)
rows_ = cov.shape[0] if rows is None else rows
data = sparse_tables_inv(cov, rows=rows_)
M_inv = restore_size(M_nonzero_idxs, data, len(self.data))
M_inv = M_inv.reindex(index=index, columns=columns).fillna(0)
return self.__class__(M_inv)
def log2norm_cov(self, mu):
"""
Transform covariance matrix to the one of the underlying normal
distribution.
Parameters
----------
mu : iterable
The desired mean values of the target lognormal distribution.
Returns
-------
`CategoryCov` of the underlying normal covariance matrix
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_cov(pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index))
A B C
A 2.19722e+00 1.09861e+00 1.38629e+00
B 1.09861e+00 2.39790e+00 1.60944e+00
C 1.38629e+00 1.60944e+00 2.07944e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series([1, 2, .5], index=["A", "B", "C"])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = [1, 2, .5]
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.array([1, 2, .5])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
Notes
-----
..notes:: Reference for the equation is 10.1016/j.nima.2012.06.036
.. math::
$$
cov(lnx_i, lnx_j) = \ln\left(\frac{cov(x_i,x_j)}{<x_i>\cdot<x_j>}+1\right)
$$
"""
mu_ = np.diag(1 / pd.Series(mu))
mu_ = pd.DataFrame(mu_, index=self.data.index, columns=self.data.index)
return self.__class__(np.log(self.sandwich(mu_).data + 1))
def log2norm_mean(self, mu):
"""
Transform mean values to the mean values of the undelying normal
distribution.
Parameters
----------
mu : iterable
The target mean values.
Returns
-------
`pd.Series` of the underlyig normal distribution mean values
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index)
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_mean([1, 1, 1])
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.ones(cov.data.shape[0])
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
Reindexing example
"""
mu_ = pd.Series(mu)
mu_.index = self.data.index
return np.log(mu_**2 / np.sqrt(np.diag(self.data) + mu_**2))
def sampling(self, nsmp, seed=None, rows=None, pdf='normal',
tolerance=None, relative=True):
"""
Extract perturbation coefficients according to chosen distribution with
covariance from given covariance matrix. See note for non-normal
distribution sampling.
The samples' mean will be 1 or 0 depending on `relative` kwarg.
Parameters
----------
nsmp : `int`
number of samples.
seed : `int`, optional, default is `None`
seed for the random number generator (by default use `numpy`
dafault pseudo-random number generator).
rows : `int`, optional, default is `None`
option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
pdf : `str`, optional, default is 'normal'
random numbers distribution.
Available distributions are:
* `'normal'`
* `'uniform'`
* `'lognormal'`
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
relative : `bool`, optional, default is `True`
flag to switch between relative and absolute covariance matrix
handling
* `True`: samples' mean will be 1
* `False`: samples' mean will be 0
Returns
-------
`sandy.Samples`
object containing samples
Notes
-----
.. note:: sampling with uniform distribution is performed on
diagonal covariance matrix, neglecting all correlations.
.. note:: sampling with lognormal distribution gives a set of samples
with mean=1 as lognormal distribution can not have mean=0.
Therefore, `relative` parameter does not apply to it.
Examples
--------
Draw 3 sets of samples using custom seed:
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11, rows=1)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sample = sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(1000000, seed=11)
>>> sample.data.cov()
0 1
0 9.98662e-01 3.99417e-01
1 3.99417e-01 9.98156e-01
Small negative eigenvalue:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, tolerance=0)
0 1
0 2.74945e+00 5.21505e+00
1 7.13927e-01 1.07147e+00
2 5.15435e-01 1.64683e+00
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, tolerance=0).data.cov()
0 1
0 9.98662e-01 -1.99822e-01
1 -1.99822e-01 2.99437e+00
Sampling with different `pdf`:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, pdf='uniform', tolerance=0)
0 1
0 -1.07578e-01 2.34960e+00
1 -6.64587e-01 5.21222e-01
2 8.72585e-01 9.12563e-01
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(3, seed=11, pdf='lognormal', tolerance=0)
0 1
0 3.03419e+00 1.57919e+01
1 5.57248e-01 4.74160e-01
2 4.72366e-01 6.50840e-01
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0).data.cov()
0 1
0 1.00042e+00 -1.58806e-03
1 -1.58806e-03 3.00327e+00
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0).data.cov()
0 1
0 1.00219e+00 1.99199e-01
1 1.99199e-01 3.02605e+00
`relative` kwarg usage:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=True).data.mean(axis=0)
0 1.00014e+00
1 9.99350e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=False).data.mean(axis=0)
0 1.41735e-04
1 -6.49679e-04
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=True).data.mean(axis=0)
0 9.98106e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=False).data.mean(axis=0)
0 -1.89367e-03
1 -7.15929e-04
dtype: float64
Lognormal distribution sampling indeoendency from `relative` kwarg
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=True).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=False).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
"""
dim = self.data.shape[0]
pdf_ = pdf if pdf != 'lognormal' else 'normal'
y = sample_distribution(dim, nsmp, seed=seed, pdf=pdf_) - 1
y = sps.csc_matrix(y)
# the covariance matrix to decompose is created depending on the chosen
# pdf
if pdf == 'uniform':
to_decompose = self.__class__(np.diag(np.diag(self.data)))
elif pdf == 'lognormal':
ones = np.ones(self.data.shape[0])
to_decompose = self.log2norm_cov(ones)
else:
to_decompose = self
L = sps.csr_matrix(to_decompose.get_L(rows=rows,
tolerance=tolerance))
samples = pd.DataFrame(L.dot(y).toarray(), index=self.data.index,
columns=list(range(nsmp)))
if pdf == 'lognormal':
# mean value of lognormally sampled distributions will be one by
# defaul
samples = np.exp(samples.add(self.log2norm_mean(ones), axis=0))
elif relative:
samples += 1
return sandy.Samples(samples.T)
@classmethod
def from_var(cls, var):
"""
Construct the covariance matrix from the variance vector.
Parameters
----------
var : 1D iterable
Variance vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_var(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 2.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_var((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 2.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_var([1, 2, 3])) is sandy.CategoryCov
"""
var_ = pd.Series(var)
cov_values = sps.diags(var_.values).toarray()
cov = pd.DataFrame(cov_values,
index=var_.index, columns=var_.index)
return cls(cov)
@classmethod
def from_stdev(cls, std):
"""
Construct the covariance matrix from the standard deviation vector.
Parameters
----------
std : `pandas.Series`
Standard deviations vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_stdev(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 4.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_stdev((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 4.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_stdev([1, 2, 3])) is sandy.CategoryCov
"""
std_ = pd.Series(std)
var = std_ * std_
return cls.from_var(var)
@classmethod
def from_stack(cls, data_stack, index, columns, values, rows=10000000,
kind='upper'):
"""
Create a covariance matrix from a stacked dataframe.
Parameters
----------
data_stack : `pd.Dataframe`
Stacked dataframe.
index : 1D iterable, optional
Index of the final covariance matrix.
columns : 1D iterable, optional
Columns of the final covariance matrix.
values : `str`, optional
Name of the column where the values are located.
rows : `int`, optional
Number of rows to take into account into each loop. The default
is 10000000.
kind : `str`, optional
Select if the stack data represents upper or lower triangular
matrix. The default is 'upper.
Returns
-------
`sandy.CategoryCov`
Covarinace matrix.
Examples
--------
If the stack data represents the covariance matrix:
>>> S = pd.DataFrame(np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]))
>>> S = S.stack().reset_index().rename(columns = {'level_0': 'dim1', 'level_1': 'dim2', 0: 'cov'})
>>> S = S[S['cov'] != 0]
>>> sandy.CategoryCov.from_stack(S, index=['dim1'], columns=['dim2'], values='cov', kind='all')
dim2 0 1 2
dim1
0 1.00000e+00 1.00000e+00 1.00000e+00
1 1.00000e+00 2.00000e+00 1.00000e+00
2 1.00000e+00 1.00000e+00 1.00000e+00
If the stack data represents only the upper triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL').data
>>> test_1
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL', rows=1).data
>>> test_2
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
If the stack data represents only the lower triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower").data
>>> test_1
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower", rows=1).data
>>> test_2
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
"""
cov = segmented_pivot_table(data_stack, rows=rows, index=index,
columns=columns, values=values)
if kind == 'all':
return cls(cov)
else:
return triu_matrix(cov, kind=kind)
def _gls_Vy_calc(self, S, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `Vy_calc` calculated using
S.dot(Vx_prior).dot(S.T)
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> cov._gls_Vy_calc(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_Vy_calc(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
index = | pd.DataFrame(S) | pandas.DataFrame |
import os
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MultipleLocator
import seaborn as sns
import motmetrics as mm
from algorithms.aaa_util import convert_df
from file_manager import ReadResult
sns.set()
sns.set_style("whitegrid")
LINEWIDTH = 4
ANNOT_SIZE = 12
ALL_HEIGHT = 3
def calc_rank(dataset, trackers_name, scores, reverse=False):
ranks = []
for seq_name in dataset.sequence_names["train"]:
value = np.array(
[scores[tracker_name][seq_name] for tracker_name in trackers_name]
)
temp = value.argsort()
if reverse:
temp = temp[::-1]
rank = np.empty_like(temp)
rank[temp] = np.arange(len(value))
rank = len(trackers_name) - rank
ranks.append(rank)
ranks = np.array(ranks)
return ranks
def draw_detngt(dataset, result_dir):
for seq in dataset:
dataset_name = seq.seq_info["dataset_name"]
seq_name = seq.seq_info["seq_name"]
if seq_name != "MOT17-09-SDP":
continue
save_dir = result_dir / dataset_name / "DetNGT" / seq_name
os.makedirs(save_dir, exist_ok=True)
for frame_idx, (img_path, dets, gts) in enumerate(seq):
filename = os.path.basename(img_path)
im = Image.open(img_path).convert("RGB")
fig, axes = plt.subplots(nrows=1, ncols=1)
fig.add_subplot(111, frameon=False)
sample_ax = axes
# draw frame
sample_ax.imshow(np.asarray(im), aspect="auto")
# draw detection results
for i in range(len(dets)):
box = dets[i, 2:6]
rect = patches.Rectangle(
(box[0], box[1]),
box[2],
box[3],
linewidth=LINEWIDTH,
edgecolor="purple",
facecolor="none",
alpha=1,
)
sample_ax.add_patch(rect)
sample_ax.annotate(
"Det",
xy=(box[0] + box[2] / 2, box[1]),
xycoords="data",
weight="bold",
xytext=(-5, 5),
textcoords="offset points",
size=ANNOT_SIZE,
color="purple",
)
# draw ground truth
for i in range(len(gts)):
if (dataset_name == "MOT16" or dataset_name == "MOT17") and gts[
i, 7
] != 1:
continue
box = gts[i, 1:6]
rect = patches.Rectangle(
(box[1], box[2]),
box[3],
box[4],
linewidth=LINEWIDTH,
edgecolor="darkorange",
facecolor="none",
alpha=1,
)
sample_ax.add_patch(rect)
sample_ax.annotate(
f"{int(box[0])}",
xy=(box[1] + box[3] / 2, box[2] + box[4]),
xycoords="data",
weight="bold",
xytext=(-5, -10),
textcoords="offset points",
size=ANNOT_SIZE,
color="darkorange",
)
sample_ax.axis("off")
# hide tick and tick label of the big axes
plt.axis("off")
plt.grid(False)
plt.savefig(save_dir / filename, bbox_inches="tight")
plt.close()
def draw_det(dataset, result_dir):
for seq in dataset:
dataset_name = seq.seq_info["dataset_name"]
seq_name = seq.seq_info["seq_name"]
if seq_name != "MOT17-09-SDP":
continue
save_dir = result_dir / dataset_name / "Det" / seq_name
os.makedirs(save_dir, exist_ok=True)
for frame_idx, (img_path, dets, gts) in enumerate(seq):
filename = os.path.basename(img_path)
im = Image.open(img_path).convert("RGB")
fig, axes = plt.subplots(nrows=1, ncols=1)
fig.add_subplot(111, frameon=False)
sample_ax = axes
# draw frame
sample_ax.imshow(np.asarray(im), aspect="auto")
# draw detection results
for i in range(len(dets)):
box = dets[i, 2:6]
rect = patches.Rectangle(
(box[0], box[1]),
box[2],
box[3],
linewidth=LINEWIDTH,
edgecolor="black",
facecolor="none",
alpha=1,
)
sample_ax.add_patch(rect)
sample_ax.annotate(
"Det",
xy=(box[0] + box[2] / 2, box[1]),
xycoords="data",
weight="bold",
xytext=(-5, -5),
textcoords="offset points",
size=ANNOT_SIZE,
color="black",
)
sample_ax.axis("off")
# hide tick and tick label of the big axes
plt.axis("off")
plt.grid(False)
plt.savefig(save_dir / filename, bbox_inches="tight")
plt.close()
def draw_gt(dataset, result_dir):
for seq in dataset:
dataset_name = seq.seq_info["dataset_name"]
seq_name = seq.seq_info["seq_name"]
save_dir = result_dir / dataset_name / "GT" / seq_name
os.makedirs(save_dir, exist_ok=True)
for frame_idx, (img_path, dets, gts) in enumerate(seq):
filename = os.path.basename(img_path)
im = Image.open(img_path).convert("RGB")
fig, axes = plt.subplots(nrows=1, ncols=1)
fig.add_subplot(111, frameon=False)
sample_ax = axes
# draw frame
sample_ax.imshow(np.asarray(im), aspect="auto")
# draw ground truth
for i in range(len(gts)):
if (dataset_name == "MOT16" or dataset_name == "MOT17") and gts[
i, 7
] != 1:
continue
box = gts[i, 1:6]
rect = patches.Rectangle(
(box[1], box[2]),
box[3],
box[4],
linewidth=LINEWIDTH,
edgecolor="black",
facecolor="none",
alpha=1,
)
sample_ax.add_patch(rect)
sample_ax.annotate(
f"{int(box[0])}",
xy=(box[1] + box[3] / 2, box[2] + box[4]),
xycoords="data",
weight="bold",
xytext=(-5, -10),
textcoords="offset points",
size=ANNOT_SIZE,
color="black",
)
sample_ax.axis("off")
# hide tick and tick label of the big axes
plt.axis("off")
plt.grid(False)
plt.savefig(save_dir / filename, bbox_inches="tight")
plt.close()
def draw_result(
output_dir, dataset, tracker_name, result_dir, is_algorithm=True, duration=None,
):
if is_algorithm:
show = ["weight", "frame"]
else:
show = ["frame"]
for seq in dataset:
dataset_name = seq.seq_info["dataset_name"]
seq_name = seq.seq_info["seq_name"]
tracker_reader = ReadResult(output_dir, dataset_name, tracker_name, seq_name)
if is_algorithm:
dataset_dir = output_dir / dataset_name / tracker_name
weight_path = dataset_dir / f"{seq_name}_weight.txt"
weights = pd.read_csv(weight_path, header=None).set_index(0)
save_dir = result_dir / dataset_name / tracker_name / seq_name
os.makedirs(save_dir, exist_ok=True)
for frame_idx, (img_path, dets, gts) in enumerate(seq):
filename = os.path.basename(img_path)
im = Image.open(img_path).convert("RGB")
cond = [drawing in show for drawing in ["weight", "frame"]]
ratios = [1 if i != 1 else 3 for i in range(len(cond)) if cond[i]]
fig, axes = plt.subplots(
nrows=sum(cond), ncols=1, gridspec_kw={"height_ratios": ratios}
)
fig.add_subplot(111, frameon=False)
i = 0
if cond[0]:
weight_ax = axes[i] if len(ratios) > 1 else axes
i += 1
if cond[1]:
sample_ax = axes[i] if len(ratios) > 1 else axes
# draw weight graph
if cond[0]:
for i in weights.columns:
weight = weights.loc[: frame_idx + 1][i]
weight_ax.plot(range(len(weight)), weight)
weight_ax.set(
ylabel="Weight", xlim=(0, len(seq)), ylim=(-0.05, 1.05,),
)
weight_ax.set_xticks([])
# draw anchor line
for i in range(frame_idx):
if i + 1 % duration == 0:
weight_ax.axvline(
x=i, color="gray", linestyle="--", linewidth=1
)
# draw frame
if cond[1]:
sample_ax.imshow(np.asarray(im), aspect="auto")
bboxes = tracker_reader.get_result_by_frame(frame_idx)
# draw tracking bbox
for i in range(len(bboxes)):
box = bboxes[i]
rect = patches.Rectangle(
(box[1], box[2]),
box[3],
box[4],
linewidth=LINEWIDTH,
facecolor="none",
alpha=1,
edgecolor="red",
)
sample_ax.add_patch(rect)
sample_ax.annotate(
f"{int(box[0])}",
xy=(box[1] + box[3] / 2, box[2]),
xycoords="data",
weight="bold",
xytext=(-5, 5),
textcoords="offset points",
size=ANNOT_SIZE,
color="red",
)
# draw ground truth
for i in range(len(gts)):
if gts[i, 7] != 1:
continue
box = gts[i, 1:6]
rect = patches.Rectangle(
(box[1], box[2]),
box[3],
box[4],
linewidth=LINEWIDTH,
edgecolor="black",
facecolor="none",
alpha=1,
)
sample_ax.add_patch(rect)
sample_ax.annotate(
f"{int(box[0])}",
xy=(box[1] + box[3] / 2, box[2] + box[4]),
xycoords="data",
weight="bold",
xytext=(-5, -10),
textcoords="offset points",
size=ANNOT_SIZE,
color="black",
)
sample_ax.axis("off")
# hide tick and tick label of the big axes
plt.axis("off")
plt.grid(False)
plt.subplots_adjust(wspace=0, hspace=0.1 if len(ratios) > 1 else 0)
plt.savefig(save_dir / filename, bbox_inches="tight")
plt.close()
def draw_all_result(output_dir, dataset, trackers_name, result_dir, colors, duration):
for seq in dataset:
dataset_name = seq.seq_info["dataset_name"]
seq_name = seq.seq_info["seq_name"]
trackers_reader = [
ReadResult(output_dir, dataset_name, tracker_name, seq_name)
for tracker_name in trackers_name
]
dataset_dir = output_dir / dataset_name / trackers_name[0]
weight_path = dataset_dir / f"{seq_name}_weight.txt"
weights = | pd.read_csv(weight_path, header=None) | pandas.read_csv |
import datetime
import string
import matplotlib.dates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from nltk import WordNetLemmatizer, LancasterStemmer, pos_tag, sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.sentiment import SentimentIntensityAnalyzer
from pandas._libs.tslibs.offsets import BDay
from sklearn import tree
from sklearn.calibration import calibration_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split, learning_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR, LinearSVC
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, plot_tree
from textblob import TextBlob
from wordcloud import WordCloud
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ =\
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
axes[0].grid()
axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes[0].legend(loc="best")
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, 'o-')
axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std, alpha=0.1)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
axes[2].grid()
axes[2].plot(fit_times_mean, test_scores_mean, 'o-')
axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
def create_word_cloud(text, type):
print('\nCreating word cloud...')
word_cloud = WordCloud(width=1024, height=1024, margin=0).generate(text)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.imshow(word_cloud, interpolation='bilinear')
ax.axis("off")
ax.margins(x=0, y=0)
plt.savefig(f'wordcloud_{type}.png')
def get_stop_words(tokens):
stop_word_tokens = []
for word in tokens:
if word.startswith('//t.co/') or word.startswith('http') or word in ['RT', 'http', 'rt', 'timestamp',
'.', '[video]', 'AMP', 'and', 'at',
'for', 'from', 'the', 'this', 'is',
'it', 'jul', 'of', 'on', 'to', 'in',
'with', 2018, 'FALSE', '2018', 'amp',
'you', 'by', False, 0, 7, 12, 15,
'0', '7', '12', '15', 'inc']:
continue
elif word not in stopwords.words('english') or word not in ['RT', 'http', 'rt', 'timestamp', '.', '[video]']:
stop_word_tokens.append(word)
sentence = ' '.join(stop_word_tokens)
return sentence
def get_lemma(tokens):
lemma = WordNetLemmatizer()
lemmatized_tokens = []
for token in tokens:
temp_tokens = lemma.lemmatize(token)
lemmatized_tokens.append(temp_tokens)
return get_stop_words(lemmatized_tokens)
def get_stems(tokens):
stemmer = LancasterStemmer()
stemmed_tokens = []
for token in tokens:
for word in token:
if word[1] == 'DT' or word[1] == 'PRP' or word[1] == 'PRP$' or word[1] == 'NN' or word[1] == 'NNP' or word[1] == 'NNPS':
temp_tokens = word[0]
else:
temp_tokens = stemmer.stem(word[0])
stemmed_tokens.append(temp_tokens)
return get_lemma(stemmed_tokens)
def get_pos_tag(tokens):
pos_tokens = [pos_tag(token) for token in tokens]
return get_stems(pos_tokens)
def get_tokens(document):
sequences = sent_tokenize(document)
seq_tokens = [word_tokenize(sequence) for sequence in sequences]
no_punctuation_seq_tokens = []
for seq_token in seq_tokens:
no_punctuation_seq_tokens.append([token for token in seq_token if token not in string.punctuation])
return get_pos_tag(no_punctuation_seq_tokens)
def get_num_words(s):
return len(s.split())
def append_col(train_data):
print('\nGetting number of words in new text cells...')
word_counts = []
for index, row in train_data.iterrows():
word_counts.append(get_num_words(row['new_text']))
train_data['new_text_count'] = word_counts
return train_data
def get_bigrams(train_data):
print("\nCalculating the bigrams...")
bigram_vectorizer = CountVectorizer(ngram_range=[2, 2])
x = bigram_vectorizer.fit_transform(train_data.text)
bigram_total = bigram_vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
bigrams = pd.DataFrame(mat.todense(), index=train_data.index, columns=bigram_vectorizer.get_feature_names())
train_data = pd.concat([train_data, bigrams], ignore_index=False, sort=False, axis=1, join="inner")
return len(bigram_total), train_data
def get_trigrams(train_data):
print("\nCalculating the trigrams...")
trigram_vectorizer = CountVectorizer(ngram_range=[3, 3])
x = trigram_vectorizer.fit_transform(train_data.text)
trigram_total = trigram_vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
trigram = pd.DataFrame(mat.todense(), index=train_data.index, columns=trigram_vectorizer.get_feature_names())
train_data = pd.concat([train_data, trigram], ignore_index=False, sort=False, axis=1, join="inner")
return len(trigram_total), train_data
def get_bag_of_words(train_data, features, name, type):
print("\nCalculating the bag of words...")
vectorizer = CountVectorizer(max_features=features, stop_words='english')
x = vectorizer.fit_transform(train_data.text)
words = vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
bow = pd.DataFrame(mat.todense(), index=train_data.index, columns=vectorizer.get_feature_names())
train_data = pd.concat([train_data, bow], ignore_index=False, sort=False, axis=1, join="inner")
df_total = train_data.drop(['text'], axis=1)
train_data.to_csv(f'df_{type}_{name}_total.csv')
return train_data
def plot_ngrams(ngrams):
print('\nPlotting ngrams...')
fig = plt.figure()
ax = plt.axes()
x = ['unigram', 'bigram', 'trigram']
ax.plot(x, ngrams)
ax.set_title('Number of ngrams in Stockerbot Dataset')
plt.savefig('ngrams.png')
def concat_date_time(train_data):
train_data['timestamp'] = train_data['date'].str.cat(train_data['time'], sep=' ')
return train_data
def get_vader_polarity(document):
vader = SentimentIntensityAnalyzer()
score = vader.polarity_scores(document)
return list(score.values())
def split_vader_polarity(train_data):
print('\nSplitting Vader sentiment dictionary into separate columns...')
nvs = []
Nvs = []
pvs = []
cvs = []
for v in train_data.iloc[:, 19]:
nvs.append(v[0])
Nvs.append(v[1])
pvs.append(v[2])
cvs.append(v[3])
train_data['negative_vader_score'] = nvs
train_data['neutral_vader_score'] = Nvs
train_data['positive_vader_score'] = pvs
train_data['compound_vader_score'] = cvs
return train_data
def get_textblob_polarity(document):
return TextBlob(document).sentiment.polarity
def get_decision_tree_regression(name, file):
print(f'Conducting decision tree regression on {name}\'s {file} file...')
train_data = pd.read_csv(f'df_{file}_{name}_total.csv')
train_data = train_data.drop(['Unnamed: 0'], axis=1)
train_data = train_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sort_values(by=['final_scores'])
X = train_data.iloc[:, 2:3].values.astype(float)
y = train_data.iloc[:, 3:4].values.astype(float)
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
print(f'\n{name} training set after standard scaling:')
print(X.shape, y.shape)
regr_1 = DecisionTreeRegressor(max_depth=2, max_features='auto')
regr_2 = DecisionTreeRegressor(max_depth=5, max_features='auto')
regr_1.fit(X, y)
regr_2.fit(X, y)
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
plt.figure()
plt.scatter(X, y, s=20, edgecolor='black',
c='darkorange', label='data')
plt.plot(X_test, y_1, color='cornflowerblue',
label='max_depth=2', linewidth=2)
plt.plot(X_test, y_2, color='yellowgreen', label='max_depth=5', linewidth=2)
plt.xlabel('data')
plt.ylabel('target')
plt.title(f'{name} Decision Tree Regression ({file})')
plt.legend()
plt.savefig(f'{file}_{name}_dtr.png')
return train_data
def get_comparison_calibration_classifiers(name1, file):
print(f'Conducting a comparison of calibration classifiers on {name1}\'s {file} file...')
train_data = pd.read_csv(f'df_{file}_{name1}_total.csv')
train_data = train_data.drop(['Unnamed: 0'], axis=1)
train_data = train_data.drop(['date'], axis=1)
train_data = train_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sort_values(by=['2_SMA'])
X = train_data[['final_scores', '2_SMA', '5_SMA', '7_EMA']]
y = train_data[['sentiment']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.7)
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC()
rfc = RandomForestClassifier()
fig = plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train.values.ravel())
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else:
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name,))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.5, 1.5])
ax1.legend(loc="lower right")
ax1.set_title(f'{name1} Calibration plots (reliability curve)({file})')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.savefig(f'{file}_{name1}_ccc.png')
return train_data
def get_support_vector_regression(name, file):
print(f'Conducting support vector regression on {name}\'s {file} file...')
senti_data = pd.read_csv(f'df_{file}_{name}_total.csv')
stock_data = pd.read_csv(f'df_stock_{name}.csv')
stocks = stock_data[['date', '2_SMA', '5_SMA', '7_EMA']].copy()
train_data = senti_data[['date', 'sentiment', 'final_scores']].copy()
new = train_data['date'].str.split(' ', n=1, expand=True)
train_data['date'] = new[0]
train_data = pd.merge(train_data, stocks, on=['date', 'date'], how='left', sort=False)
train_data = train_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.fillna(method='ffill')
train_data = train_data.fillna(value=0)
train_data = train_data.sort_values(by=['final_scores'])
X = train_data.iloc[:, 2:3].values.astype(float)
y = train_data.iloc[:, 3:4].values.astype(float)
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
print(f'\n{name} training set after standard scaling:')
print(X.shape, y.shape)
svr_rbf = SVR(kernel='rbf', C=10000, gamma=0.1, epsilon=.1)
svr_lin = SVR(kernel='linear', C=10000, gamma='auto')
svr_poly = SVR(kernel='poly', C=10000, gamma='auto', degree=3, epsilon=.1,
coef0=1)
lw = 2
svrs = [svr_rbf, svr_lin, svr_poly]
kernel_label = ['RBF', 'Linear', 'Polynomial']
model_color = ['m', 'c', 'g']
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 10), sharey=True)
for ix, svr in enumerate(svrs):
axes[ix].plot(X, svr.fit(X, y).predict(X), color=model_color[ix], lw=lw,
label='{} model'.format(kernel_label[ix]))
axes[ix].scatter(X[svr.support_], y[svr.support_], facecolor="none",
edgecolor=model_color[ix], s=50,
label='{} support vectors'.format(kernel_label[ix]))
axes[ix].scatter(X[np.setdiff1d(np.arange(len(X)), svr.support_)],
y[np.setdiff1d(np.arange(len(X)), svr.support_)],
facecolor="none", edgecolor="k", s=50,
label='other training data')
axes[ix].legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),
ncol=1, fancybox=True, shadow=True)
fig.text(0.5, 0.04, 'data', ha='center', va='center')
fig.text(0.06, 0.5, 'target', ha='center', va='center', rotation='vertical')
fig.suptitle(f'{name} Support Vector Regression ({file})', fontsize=14)
plt.savefig(f'{file}_{name}_swr.png')
train_data.to_csv(f'df_{file}_{name}_total.csv')
return train_data
def get_decision_tree_classifier(train_data, name, file):
print(f'Creating decision tree classifiers on {name}\'s {file} file...')
train_data = train_data.drop(['date'], axis=1)
train_data = train_data.drop(['trading_time'], axis=1)
train_data = train_data.drop(['source'], axis=1)
train_data = train_data.drop(['text'], axis=1)
sentiment = train_data.pop('sentiment')
train_data.insert(0, 'sentiment', sentiment)
y = train_data.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(train_data, y, test_size=0.33)
dtc = DecisionTreeClassifier(criterion='entropy', max_features='auto', max_depth=5, random_state=0)
print("Decision Tree classifier")
pred = dtc.fit(X_train, y_train)
predictions = pred.predict(X_test)
text_representation = tree.export_text(dtc)
with open(f'decision_tree_{file}_{name}.log', 'w') as fout:
fout.write(text_representation)
feature_names = list(train_data.columns.values)
fig = plt.figure(figsize=(15, 10))
plot_tree(dtc,
feature_names=feature_names,
class_names=["FALSE", "TRUE"],
filled=True,
fontsize=12)
plt.title(f'{file} Decision Tree for {name}')
plt.savefig(f'decision_tree_{file}_{name}.png')
fig = plt.figure(figsize=(15, 10))
con_mat = confusion_matrix(y_true=y_test, y_pred=predictions)
group_names = ['True Neg', 'False Pos', 'False Neg', 'True Pos']
group_counts = ['{0: 0.0f}'.format(value) for value in con_mat.flatten()]
group_percentages = ['{0: .2f}'.format(value) for value in con_mat.flatten() / np.sum(con_mat)]
labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in zip(group_names, group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2, 2)
sns.heatmap(con_mat, annot=labels, fmt='', cmap='Blues')
plt.title(f'{file} Confusion Matrix for {name}')
plt.savefig(f'confusion_matrix_{file}_{name}.png')
fig = plt.figure(figsize=(15, 10))
class_rpt = pd.DataFrame(classification_report(predictions, y_test, digits=2, output_dict=True))
class_rpt.style.background_gradient(cmap='newcmp', subset=pd.IndexSlice['0':'9', :'f1-score']).set_properties(
**{'text-align': 'center', 'font-size': '30px'})
sns.heatmap(class_rpt.iloc[:-1, :].T, annot=True)
plt.title(f'{file} Classification Report for {name}')
plt.savefig(f'classification_report_{file}_{name}.png')
def combine_stock_sentiments(name, code):
print('\nCombining extreme and blob data frames back with train_data for regressions...')
train_data = pd.read_csv(f'stockerbot_cleaned.csv')
if code == 0:
df_extreme = pd.read_csv(f'df_extreme_vader_{name}.csv')
df_extreme['date'] = pd.to_datetime(df_extreme['date'])
type = 'vader'
elif code == 1:
df_extreme = pd.read_csv(f'df_extreme_blob_{name}.csv')
df_extreme['date'] = pd.to_datetime(df_extreme['date'])
type = 'blob'
train_data['date'] = pd.to_datetime(train_data['date'] + ' ' + train_data['time'])
df_total = pd.merge(df_extreme, train_data, on=['date', 'date'], how='left',
sort=False, suffixes=('_v', '_b'))
df_total = df_total.drop(['Unnamed: 0_v'], axis=1)
df_total = df_total.drop(['Unnamed: 0_b'], axis=1)
df_total = df_total.drop(['Date'], axis=1)
df_total = df_total.drop(['time'], axis=1)
df_total = df_total.drop(['fb'], axis=1)
df_total = df_total.drop(['aapl'], axis=1)
df_total = df_total.drop(['amzn'], axis=1)
df_total = df_total.drop(['nflx'], axis=1)
df_total = df_total.drop(['googl'], axis=1)
df_total = df_total.drop(['vader_sentiment'], axis=1)
df_total = df_total.drop(['negative_vader_score'], axis=1)
df_total = df_total.drop(['neutral_vader_score'], axis=1)
df_total = df_total.drop(['positive_vader_score'], axis=1)
df_total = df_total.drop(['compound_vader_score'], axis=1)
df_total = df_total.drop(['tb_sentiment'], axis=1)
df_total = df_total.drop(['verified'], axis=1)
df_total = df_total.drop(['faang'], axis=1)
df_total = df_total.drop(['timestamp'], axis=1)
df_total = df_total.drop(['above_mean'], axis=1)
df_total['sentiment'] = np.where(df_total['final_scores'] > 0, 0, 1)
df_total = df_total.fillna(value=0)
df_total.to_csv(f'df_{type}_{name}_total.csv')
return df_total
def get_trade_open(date):
curr_date_open = pd.to_datetime(date).floor('d').replace(hour=13, minute=30) - BDay(0)
curr_date_close = | pd.to_datetime(date) | pandas.to_datetime |
__author__ = "<NAME>, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__maintainer__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
__license__ = "MIT"
import matplotlib.pyplot as plt
import numpy as np
import pandas
import pandas as pd
from matplotlib.ticker import NullFormatter
from idf_analysis import IntensityDurationFrequencyAnalyse
from idf_analysis.definitions import COL
from idf_analysis.little_helpers import duration_steps_readable, minutes_readable, frame_looper, event_caption
from idf_analysis.sww_utils import (guess_freq, rain_events, event_duration, resample_rain_series, rain_bar_plot,
agg_events, )
COL.MAX_SRI = 'max_SRI_{}'
COL.MAX_SRI_DURATION = 'max_SRI_duration_{}'
####################################################################################################################
def grisa_factor(tn):
"""
calculates the grisa-factor according to Grisa's formula
Args:
tn (float): in [years]
Returns:
float: factor
"""
return 1 + (np.log(tn) / np.log(2))
def next_bigger(v, l):
return l[next(x for x, val in enumerate(l) if val >= v)]
class SCHMITT:
# Zuweisung nach Schmitt des SRI über der Wiederkehrperiode
SRI_TN = {
1: 1,
2: 1,
3: 2,
5: 2,
10: 3,
20: 4,
25: 4,
30: 5,
50: 6,
75: 6,
100: 7
}
# Erhöhungsfaktoren nach Schmitt für SRI 8,9,10,11,12 basierend auf SRI 7
# untere und obere Grenze
MULTI_FACTOR = {
8: (1.2, 1.39),
9: (1.4, 1.59),
10: (1.6, 2.19),
11: (2.2, 2.78),
12: (2.8, 2.8),
}
VERBAL = {
(1, 2): 'Starkregen',
(3, 5): 'intensiver Starkregen',
(6, 7): 'außergewöhnlicher Starkregen',
(8, 12): 'extremer Starkregen'
}
INDICES_COLOR = {1: (0.69, 0.9, 0.1),
2: (0.8, 1, 0.6),
3: (0.9, 1, 0.3),
4: (1, 0.96, 0),
5: (1, 0.63, 0),
6: (1, 0.34, 0),
7: (1, 0.16, 0),
8: (0.97, 0.12, 0.24),
9: (1, 0.10, 0.39),
10: (0.97, 0.03, 0.51),
11: (0.92, 0.08, 0.75),
12: (0.66, 0.11, 0.86)}
INDICES_COLOR_RGB = {1: (176, 230, 25),
2: (204, 255, 153),
3: (230, 255, 77),
4: (255, 244, 0),
5: (255, 160, 0),
6: (255, 86, 0),
7: (255, 40, 0),
8: (247, 30, 61),
9: (255, 26, 99),
10: (247, 9, 130),
11: (235, 21, 191),
12: (189, 28, 220)}
INDICES_COLOR_HEX = {1: "#b0e619",
2: "#ccff99",
3: "#e6ff4d",
4: "#fff400",
5: "#ffa000",
6: "#ff5600",
7: "#ff2800",
8: "#f71e3d",
9: "#ff1a63",
10: "#f70982",
11: "#eb15bf",
12: "#bd1cdc"}
krueger_pfister_verbal = {
(1, 4): 'moderat',
(5, 7): 'stark',
(8, 10): 'heftig',
(11, 12): 'extrem'
}
grisa_verbal = {
(1, 2): 'Minor',
(3, 4): 'Moderate',
(5, 6): 'Major',
(7, 8): 'Extreme',
(9, 10): 'Catastrophic'
}
def cat_dict(cat):
res = {}
for num_range, verbal in cat.items():
for i in range(num_range[0], num_range[1]+1):
res[i] = verbal
return res
####################################################################################################################
class HeavyRainfallIndexAnalyse(IntensityDurationFrequencyAnalyse):
indices = list(range(1, 13))
class METHODS:
SCHMITT = 'Schmitt'
KRUEGER_PFISTER = 'KruegerPfister'
MUDERSBACH = 'Mudersbach'
@classmethod
def all(cls):
return cls.SCHMITT, cls.KRUEGER_PFISTER, cls.MUDERSBACH
indices_color = SCHMITT.INDICES_COLOR
def __init__(self, *args, method=METHODS.SCHMITT, **kwargs):
IntensityDurationFrequencyAnalyse.__init__(self, *args, **kwargs)
self.method = method
self._sri_frame = None
def set_series(self, series):
IntensityDurationFrequencyAnalyse.set_series(self, series)
self._sri_frame = None
def get_sri(self, height_of_rainfall, duration):
"""
calculate the heavy rain index (StarkRegenIndex), when the height of rainfall and the duration are given
Args:
height_of_rainfall (float): in [mm]
duration (int | float | list | numpy.ndarray | pandas.Series): in minutes
Returns:
int | float | list | numpy.ndarray | pandas.Series: heavy rain index
"""
tn = self.get_return_period(height_of_rainfall, duration)
if self.method == self.METHODS.MUDERSBACH:
if isinstance(tn, (pd.Series, np.ndarray)):
sri = np.round(1.5 * np.log(tn) + 0.4 * np.log(duration), 0)
sri[tn <= 1] = 1
sri[tn >= 100] = 12
return sri
else:
if tn <= 1:
return 1
elif tn >= 100:
return 12
else:
return np.round(1.5 * np.log(tn) + 0.4 * np.log(duration), 0)
elif self.method == self.METHODS.SCHMITT:
if isinstance(tn, (pd.Series, np.ndarray)):
breaks = [-np.inf] + list(SCHMITT.SRI_TN.keys()) + [np.inf]
d = dict(zip(range(11), SCHMITT.SRI_TN.values()))
sri = pd.cut(tn, breaks, labels=False).replace(d)
over_100 = tn > 100
hn_100 = self.depth_of_rainfall(duration, 100)
breaks2 = [1] + [f[0] for f in SCHMITT.MULTI_FACTOR.values()][1:] + [np.inf]
d2 = dict(zip(range(len(breaks2) - 1), range(8, 13)))
sri.loc[over_100] = pd.cut(height_of_rainfall.loc[over_100] / hn_100, breaks2, labels=False).replace(d2)
else:
if tn >= 100:
hn_100 = self.depth_of_rainfall(duration, 100)
for sri, mul in SCHMITT.MULTI_FACTOR.items():
if height_of_rainfall <= hn_100 * mul[0]:
break
else:
sri = SCHMITT.SRI_TN[next_bigger(tn, list(SCHMITT.SRI_TN.keys()))]
elif self.method == self.METHODS.KRUEGER_PFISTER:
h_24h = self.depth_of_rainfall(duration=24 * 60, return_period=tn)
hn_100 = self.depth_of_rainfall(duration=duration, return_period=100)
duration_adjustment_factor = height_of_rainfall / h_24h
intensity_adjustment_factor = height_of_rainfall / hn_100
sri = grisa_factor(tn) * duration_adjustment_factor * intensity_adjustment_factor
if isinstance(sri, (pd.Series, np.ndarray)):
sri[tn < 0.5] = 0
else:
if tn < 0.5:
return 0
return np.clip(np.ceil(sri), 0, 12)
else:
raise NotImplementedError(f'Method {self.method} not implemented!')
return sri
# __________________________________________________________________________________________________________________
def result_sri_table(self, durations=None):
"""
get a standard idf table of rainfall depth with return periods as columns and durations as rows
Args:
durations (list | numpy.ndarray | None): list of durations in minutes for the table
Returns:
pandas.DataFrame: idf table
"""
idf_table = self.result_table(durations)
if self.method == self.METHODS.SCHMITT:
sri_table = idf_table.rename(columns=SCHMITT.SRI_TN)
for sri, mul in SCHMITT.MULTI_FACTOR.items():
sri_table[sri] = mul[1] * sri_table[7]
sri_table = sri_table.loc[:, ~sri_table.columns.duplicated('last')]
elif self.method == self.METHODS.MUDERSBACH:
# zuerst eine Tabelle mit den Wiederkehrperioden
rp_table = pd.DataFrame(index=idf_table.index, columns=range(1, 13))
# abhängigkeit nach dauerstufe
a = np.log(rp_table.index.values) * 0.4
for sri in rp_table.columns:
rp_table[sri] = np.exp((sri + 0.5 - a) / 1.5)
rp_table.loc[:, 1] = 1
# rp_table.loc[:, 12] = 100
# dann mittels Dauerstufe und Wiederkehrperiode die Regenhöhe errechnen
sri_table = rp_table.round(1).copy()
for dur in rp_table.index:
sri_table.loc[dur] = self.depth_of_rainfall(dur, rp_table.loc[dur])
# extrapolation vermutlich nicht sehr seriös
sri_table[rp_table >= 100] = np.NaN
# sri_table.loc[:12] = self.depth_of_rainfall(sri_table.index.values, 100)
sri_table[rp_table < 1] = np.NaN
sri_table = sri_table.astype(float).round(2)
sri_table = sri_table.fillna(method='ffill', axis=1, limit=None)
elif self.method == self.METHODS.KRUEGER_PFISTER:
# duration_adjustment_factor = idf_table.div(idf_table.loc[24 * 60])
# intensity_adjustment_factor = idf_table.div(idf_table[100].values, axis=0)
# sri_table = grisa_factor(
# idf_table.columns.values) * duration_adjustment_factor * intensity_adjustment_factor
# sri_table = sri_table.round().astype(int).clip(0,12)
sri_table = pd.DataFrame(index=idf_table.index)
sri_vector = (idf_table.loc[1440, 100] * idf_table.loc[:, 100]) / (1 + (np.log(100) / np.log(2)))
for i in self.indices:
sri_table[i] = np.sqrt(i * sri_vector)
else:
raise NotImplementedError(f'Method or "{self.method}" not implemented! '
f'Please contact the developer for the request to implement it.')
sri_table.index.name = 'duration in min'
sri_table.columns.name = 'SRI'
return sri_table
def interim_sri_table(self, durations=None):
"""M
get a table of SRI with return periods as columns and durations as rows
Args:
durations (list | numpy.ndarray): list of durations in minutes for the table
return_periods (list): list of return periods in years for the table
Returns:
pandas.DataFrame: idf table
"""
idf_table = self.result_table(durations)
sri_table = pd.DataFrame(index=idf_table.index, columns=idf_table.columns)
if self.method == self.METHODS.SCHMITT:
for col in sri_table:
sri_table[col] = SCHMITT.SRI_TN[col]
elif self.method == self.METHODS.MUDERSBACH:
sri_table[1] = 1
a = np.log(sri_table.index.values) * 0.4
for tn in [2, 3, 5, 10, 20, 25, 30, 50, 75, 100]:
sri_table[tn] = a + np.log(tn) * 1.5
sri_table = sri_table.round().astype(int)
elif self.method == self.METHODS.KRUEGER_PFISTER:
duration_adjustment_factor = idf_table.div(idf_table.loc[24 * 60])
intensity_adjustment_factor = idf_table.div(idf_table[100].values, axis=0)
sri_table = grisa_factor(idf_table.columns.values) * duration_adjustment_factor * intensity_adjustment_factor
sri_table = sri_table.round().astype(int).clip(0,12)
else:
raise NotImplementedError(f'Method {self.method} not implemented!')
sri_table.index.name = 'duration in min'
sri_table.columns.name = 'Return Period in a'
return sri_table
####################################################################################################################
def result_sri_figure(self, duration_steps=None, ax=None, grid=True):
"""
SRI curves are generated depending on the selected procedure for SRI generation.
Args:
duration_steps (list | numpy.ndarray): list of durations in minutes for the table
ax (plt.Axes): if plot is a subplot give the axes
grid (bool): if to make a grid
Returns:
(matplotlib.pyplot.Figure, matplotlib.pyplot.Axes): figure and axes of the plot
"""
sri_table = self.result_sri_table(durations=duration_steps)
ax = sri_table.plot(color=self.indices_color, logx=True, legend=True, ax=ax)
ax.tick_params(axis='both', which='both', direction='out')
ax.set_xlabel('Duration D')
ax.set_ylabel('Rainfall h$\\mathsf{_N}$ in mm')
# ax.set_xlabel('Dauerstufe D')
# ax.set_ylabel('Regenhöhe h$\\mathsf{_N}$ in mm')
# ax.set_title('Starkregenindex - Kurven', fontweight='bold')
ax.set_xticks([], minor=True)
ax.set_xticks(sri_table.index)
ax.set_xlim(*sri_table.index.values[[0, -1]])
ax.set_xticklabels(duration_steps_readable(sri_table.index))
ax.set_facecolor('w')
if grid:
ax.grid(color='grey', linestyle='-', linewidth=0.3)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], bbox_to_anchor=(1.02, 1.), loc='upper left',
borderaxespad=0., title='SRI')
fig = ax.get_figure()
# cm_to_inch = 2.54
# fig.set_size_inches(h=11 / cm_to_inch, w=25 / cm_to_inch) # (11.69, 8.27)
fig.tight_layout()
return fig, ax
####################################################################################################################
@property
def sri_frame(self):
"""
get the return periods over the whole time-series for the default duration steps.
Returns:
pandas.DataFrame: data-frame of return periods where the columns are the duration steps
"""
if self._sri_frame is None:
self._sri_frame = self.get_sri_frame()
return self._sri_frame
def get_sri_frame(self, series=None, durations=None):
"""
Args:
series (pandas.Series): precipitation time-series of the time range of interest i.e. of an event
durations (list): list of durations in minutes which are of interest (default: pre defined durations)
Returns:
pandas.DataFrame: index=date-time-index; columns=durations; values=SRI
"""
# TODO: Probleme bei geringen Regenhöhen, Formel nicht dafür gemacht!!
sums = self.get_rainfall_sum_frame(series=series, durations=durations)
df = pd.DataFrame(index=sums.index)
for d in frame_looper(sums.index.size, columns=sums.columns, label='sri'):
df[d] = self.get_sri(height_of_rainfall=sums[d][sums[d] > 0.1], duration=d)
return df#.round(1)
def add_max_sri_to_events(self, events, series=None):
"""M
maximum SRI is added to the table
Args:
events (list): list of rainfall events
Returns:
pandas.DataFrame: table
"""
if COL.MAX_SRI.format(self.method) not in events:
events[COL.MAX_SRI.format(self.method)] = None
events[COL.MAX_SRI_DURATION.format(self.method)] = None
rainfall_sum_frame = self.get_rainfall_sum_frame(series=series)
for event_no, event in events.iterrows():
s = self.get_event_sri_max(event[COL.START], event[COL.END], rainfall_sum_frame=rainfall_sum_frame)
events.loc[event_no, COL.MAX_SRI.format(self.method)] = s.max()
events.loc[event_no, COL.MAX_SRI_DURATION.format(self.method)] = s.idxmax()
def get_event_sri_max(self, start, end, rainfall_sum_frame=None):
"""M
maximum SRI is added to the table
Args:
events (list): list of rainfall events
Returns:
pandas.DataFrame: table
"""
if rainfall_sum_frame is None:
d = self.rainfall_sum_frame[start:end].max().to_dict()
else:
d = rainfall_sum_frame[start:end].max().to_dict()
sri = dict()
for dur, h in d.items():
sri[dur] = self.get_sri(h, dur)
return pd.Series(sri, name=self.method)
def sri_bar_axes(self, ax, sri_frame, durations=None):
""" create a bar axes for the sri event plot
Args:
ax (matplotlib.pyplot.Axes):
sri_frame (pandas.DataFrame): index=DatetimeIndex and columns=SRI
durations (list):
Returns:
matplotlib.pyplot.Axes:
"""
if durations is None:
durations = [5, 10, 15, 20, 30, 45, 60, 90, 120, 180, 240, 360, 540, 720, 1080, 1440, 2880, 4320]
# legend
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=self.indices_color[i], lw=4) for i in self.indices]
names = [str(i) for i in self.indices]
ax.legend(custom_lines, names, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=len(self.indices),
mode="expand", borderaxespad=0., title='StarkRegenIndex', handlelength=0.7)
duration_size = len(durations)
# labels for the y axis
durations_index = range(duration_size)
dh = 1
ax.set_yticks([i + dh / 2 for i in durations_index], minor=True)
ax.set_yticks(list(durations_index), minor=False)
ax.set_yticklabels(duration_steps_readable(durations), minor=True)
ax.set_yticklabels([''] * duration_size, minor=False)
ax.set_ylabel('duration of the design rainfall')
# for the relative start time
freq = guess_freq(sri_frame.index)
start_period = sri_frame.index[0].to_period(freq).ordinal
# idf_table.index = idf_table.index - idf_table.index[0]
min_duration = pd.Timedelta(minutes=1)
for hi, d in enumerate(sri_frame.columns):
sri = sri_frame[d]
for i in self.indices:
# not really a rain event, but the results are the same
tab = rain_events(sri, ignore_rain_below=i, min_gap=freq)
if tab.empty:
continue
if 1:
durations = (event_duration(tab) / min_duration).tolist()
rel_starts = ((tab[COL.START] - sri_frame.index[0]) / min_duration + start_period).tolist()
bar_x = list(zip(rel_starts, durations))
else:
tab[COL.DUR] = event_duration(tab) / min_duration
bar_x = [(r[COL.START] / min_duration + start_period, r[COL.DUR]) for _, r in tab.iterrows()]
ax.broken_barh(bar_x, (hi, dh), facecolors=self.indices_color[i])
ax.set_ylim(0, duration_size)
ax.set_xticklabels([])
ax.xaxis.set_major_formatter(NullFormatter())
ax.axhline(0, color='k')
ax.axhline(duration_size / 2, color='k')
return ax
@staticmethod
def event_plot_caption(event, method, unit='mm'):
"""
get a caption for the event
Args:
event (pd.Series | dict): statistics of the event
method (str): used method for HRI estimation (i.e. SCHMITT, MUDERSBACH, KRUEGER_PFISTER)
unit (str): unit of the observation
Returns:
str: caption for the plot
"""
caption = event_caption(event, unit) + '\n'
caption += f'The method used for the SRI calculation is: {method}.\n'
if COL.MAX_SRI.format(method) in event:
caption += f'The maximum SRI was {event[COL.MAX_SRI.format(method)]:0.2f}\n'
if COL.MAX_SRI_DURATION.format(method) in event:
caption += f'at a duration of {minutes_readable(event[COL.MAX_SRI_DURATION.format(method)])}.'
return caption
def event_plot_sri(self, event, durations=None, unit='mm', column_name='Precipitation'):
"""
get a plot of the selected event
Args:
event (pandas.Series): event
durations (list | numpy.ndarray): list of durations in minutes for the table
unit (str): unit of the observation
column_name (str): label of the observation
Returns:
(matplotlib.pyplot.Figure, matplotlib.pyplot.Axes): figure and axes of the plot
"""
event = event.to_dict()
start = event[COL.START]
end = event[COL.END]
plot_range = slice(start - pd.Timedelta(self._freq), end + pd.Timedelta(self._freq))
if durations:
max_dur = max(durations)
else:
max_dur = max(self.duration_steps)
sri_frame_extended = self.get_sri_frame(
self.series[start - | pd.Timedelta(minutes=max_dur) | pandas.Timedelta |
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep="last")
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep="last")
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize(
"freq",
[
"A",
"2A",
"-2A",
"Q",
"-1Q",
"M",
"-1M",
"D",
"3D",
"-3D",
"W",
"-1W",
"H",
"2H",
"-2H",
"T",
"2T",
"S",
"-3S",
],
)
def test_infer_freq(self, freq):
# GH 11018
idx = pd.date_range("2011-01-01 09:00:00", freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(["2011-01-01", "NaT"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals( | pd.Series(idx2) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.