prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import time
import scipy
import random
import pickle
import torch
import json
import numpy as np
import pandas as pd
from urllib import request
pd.set_option('display.width', 1000)
def adj_to_tensor(adj):
if type(adj) != scipy.sparse.coo.coo_matrix:
adj = adj.tocoo()
sparse_row = torch.LongTensor(adj.row).unsqueeze(1)
sparse_col = torch.LongTensor(adj.col).unsqueeze(1)
sparse_concat = torch.cat((sparse_row, sparse_col), 1)
sparse_data = torch.FloatTensor(adj.data)
adj_tensor = torch.sparse.FloatTensor(sparse_concat.t(), sparse_data, torch.Size(adj.shape))
return adj_tensor
def adj_preprocess(adj, adj_norm_func=None, mask=None, model_type="torch", device='cpu'):
if adj_norm_func is not None:
adj = adj_norm_func(adj)
if model_type == "torch":
if type(adj) is tuple:
if mask is not None:
adj = [adj_to_tensor(adj_[mask][:, mask]).to(device) for adj_ in adj]
else:
adj = [adj_to_tensor(adj_).to(device) for adj_ in adj]
else:
if mask is not None:
adj = adj_to_tensor(adj[mask][:, mask]).to(device)
else:
adj = adj_to_tensor(adj).to(device)
elif model_type == "dgl":
if type(adj) is tuple:
if mask is not None:
adj = [adj_[mask][:, mask] for adj_ in adj]
else:
adj = [adj_ for adj_ in adj]
else:
if mask is not None:
adj = adj[mask][:, mask]
else:
adj = adj
return adj
def feat_preprocess(features, feat_norm=None, device='cpu'):
def feat_normalize(feat, norm=None):
if norm == "arctan":
feat = 2 * np.arctan(feat) / np.pi
elif norm == "tanh":
feat = np.tanh(feat)
else:
feat = feat
return feat
if type(features) != torch.Tensor:
features = torch.FloatTensor(features)
elif features.type() != 'torch.FloatTensor':
features = features.float()
if feat_norm is not None:
features = feat_normalize(features, norm=feat_norm)
features = features.to(device)
return features
def label_preprocess(labels, device='cpu'):
if type(labels) != torch.Tensor:
labels = torch.LongTensor(labels)
elif labels.type() != 'torch.LongTensor':
labels = labels.long()
labels = labels.to(device)
return labels
def fix_seed(seed=0):
"""
Fix random process by a seed.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_num_params(model):
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
def save_features(features, file_dir, file_name='features.npy'):
if features is not None:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
np.save(os.path.join(file_dir, file_name), features.cpu().detach().numpy())
def save_adj(adj, file_dir, file_name='adj.pkl'):
if adj is not None:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
with open(os.path.join(file_dir, file_name), 'wb') as f:
pickle.dump(adj, f)
def save_model(model, save_dir, name, verbose=True):
if save_dir is None:
cur_time = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
save_dir = "./tmp_{}".format(cur_time)
os.makedirs(save_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(model.state_dict(), os.path.join(save_dir, name))
if verbose:
print("Model saved in '{}'.".format(os.path.join(save_dir, name)))
def get_index_induc(index_a, index_b):
i_a, i_b = 0, 0
l_a, l_b = len(index_a), len(index_b)
i_new = 0
index_a_new, index_b_new = [], []
while i_new < l_a + l_b:
if i_a == l_a:
while i_b < l_b:
i_b += 1
index_b_new.append(i_new)
i_new += 1
continue
elif i_b == l_b:
while i_a < l_a:
i_a += 1
index_a_new.append(i_new)
i_new += 1
continue
if index_a[i_a] < index_b[i_b]:
i_a += 1
index_a_new.append(i_new)
i_new += 1
else:
i_b += 1
index_b_new.append(i_new)
i_new += 1
return index_a_new, index_b_new
def download(url, save_path):
print("Downloading from {}".format(url))
try:
data = request.urlopen(url)
except Exception as e:
print(e)
print("Failed to download the dataset.")
exit(1)
with open(save_path, "wb") as f:
f.write(data.read())
def save_dict_to_xlsx(result_dict, file_dir, file_name="result.xlsx", index=0, verbose=True):
if not os.path.exists(file_dir):
os.makedirs(file_dir)
df = | pd.DataFrame(result_dict, index=[index]) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
| assert_series_equal(ts + s, expected) | pandas.util.testing.assert_series_equal |
from typing import Optional
import numpy as np
import pandas as pd
from pandas import DatetimeIndex
from stateful.representable import Representable
from stateful.storage.tree import DateTree
from stateful.utils import list_of_instance, cast_output
from pandas.api.types import infer_dtype
class Stream(Representable):
def __init__(self, name, configuration: dict = None, dtype=None, tree: Optional[DateTree] = None):
Representable.__init__(self)
self.name = name
self.dtype = dtype if dtype else configuration.get("dtype")
self._tree = tree
self.configuration = configuration if configuration else {}
@property
def length(self):
return len(self._tree) if self._tree else None
@property
def tree(self):
if self._tree is None:
self._tree = DateTree(self.name, self.dtype)
return self._tree
@property
def interpolation(self):
return self.tree.interpolation
@property
def empty(self):
return not (self.tree is not None and not self.tree.empty)
@property
def start(self):
return self.tree.start
@property
def end(self):
return self.tree.end
@property
def first(self):
return self.tree.first
@property
def last(self):
return self.tree.last
def set_name(self, name):
self.name = name
self._tree.name = name
def ceil(self, date):
return self.tree.ceil(date)
def floor(self, date):
return self.tree.floor(date)
def head(self, n=5) -> pd.DataFrame:
if self.empty:
return pd.DataFrame()
index, values = [], []
iterator = iter(self._tree)
while len(index) < n:
idx, value = next(iterator)
index.append(idx)
values.append(value)
if list_of_instance(values, dict):
return pd.DataFrame(values, index=index)
else:
name = self.name if self.name else "values"
columns = [{name: v} for v in values]
return pd.DataFrame(columns, index=index)
def df(self) -> pd.DataFrame:
if self.empty:
return pd.DataFrame()
index, values = zip(*list(self._tree))
if list_of_instance(values, dict):
return pd.DataFrame(values, index=index)
else:
return pd.DataFrame(columns={self.name: values}, index=index)
def alias(self, name):
return Stream(name, self.configuration, self.dtype, self._tree.alias(name), self.function, self.frozen)
def values(self):
return self.tree.values()
def dates(self):
return self.tree.dates()
def on(self, on=True) -> None:
self._tree.on(on)
def within(self, date) -> bool:
return self.tree.within(date)
def get(self, date, cast=True):
if cast:
return cast_output(self.dtype, self.tree.get(date))
else:
return self.tree.get(date)
def all(self, dates: DatetimeIndex, cast=True):
if cast:
return self.tree.all(dates).cast(self.dtype)
else:
return self.tree.all(dates)
def add(self, date, state):
if pd.isna(state) and self.empty:
return
if self.dtype is None:
self.dtype = | infer_dtype([state]) | pandas.api.types.infer_dtype |
# Copyright 2018 <NAME>, <NAME>.
# (Strongly inspired by original Google BERT code and Hugging Face's code)
""" Fine-tuning on A Classification Task with pretrained Transformer """
import itertools
import csv
import fire
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import tokenization
import models
import optim
import train
import pdb
import numpy as np
import pandas as pd
from utils import set_seeds, get_device, truncate_tokens_pair
import os
def read_explanations(path):
header = []
uid = None
df = | pd.read_csv(path, sep='\t', dtype=str) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 5 16:32:56 2019
@author: daniele
"""
#%% IMPORTS
from dataset import DatabaseManager, loadData, splitData
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import poisson, skellam
import statsmodels.api as sm
import statsmodels.formula.api as smf
from scipy.optimize import minimize
import time
result_path = './Results/'
#%%
def rho_correction(x, y, lambda_x, mu_y, rho):
if x==0 and y==0:
return 1- (lambda_x * mu_y * rho)
elif x==0 and y==1:
return 1 + (lambda_x * rho)
elif x==1 and y==0:
return 1 + (mu_y * rho)
elif x==1 and y==1:
return 1 - rho
else:
return 1.0
def solve_parameters(dataset, debug = False, init_vals=None, options={'disp': True, 'maxiter':100},
constraints = [{'type':'eq', 'fun': lambda x: sum(x[:20])-20}] , **kwargs):
teams = np.sort(dataset['HomeTeam'].unique())
# check for no weirdness in dataset
away_teams = np.sort(dataset['AwayTeam'].unique())
if not np.array_equal(teams, away_teams):
raise ValueError("Something's not right")
n_teams = len(teams)
if init_vals is None:
# random initialisation of model parameters
init_vals = np.concatenate((np.random.uniform(0,1,(n_teams)), # attack strength
np.random.uniform(0,-1,(n_teams)), # defence strength
np.array([0, 1.0]) # rho (score correction), gamma (home advantage)
))
def dc_log_like(x, y, alpha_x, beta_x, alpha_y, beta_y, rho, gamma):
lambda_x, mu_y = np.exp(alpha_x + beta_y + gamma), np.exp(alpha_y + beta_x)
return (np.log(rho_correction(x, y, lambda_x, mu_y, rho)) +
np.log(poisson.pmf(x, lambda_x)) + np.log(poisson.pmf(y, mu_y)))
# @jit(float64(float64[:], int64), nopython=True, parallel=True)
def estimate_paramters(params):
score_coefs = dict(zip(teams, params[:n_teams]))
defend_coefs = dict(zip(teams, params[n_teams:(2*n_teams)]))
rho, gamma = params[-2:]
log_like = [dc_log_like(row.HomeGoals, row.AwayGoals, score_coefs[row.HomeTeam], defend_coefs[row.HomeTeam],
score_coefs[row.AwayTeam], defend_coefs[row.AwayTeam], rho, gamma) for row in dataset.itertuples()]
return -sum(log_like)
# @jit(float64[:](float64[:], int64), nopython=True, parallel=True)
# def fast_jac(x, N):
# h = 1e-9
# jac = np.zeros_like(x)
# f_0 = estimate_paramters(params)
# for i in range(N):
# x_d = np.copy(x)
# x_d[i] += h
# f_d = estimate_paramters(x_d, N)
# jac[i] = (f_d - f_0) / h
# return jac
opt_output = minimize(estimate_paramters, init_vals, method='SLSQP', options=options, constraints = constraints, **kwargs)
if debug:
# sort of hacky way to investigate the output of the optimisation process
return opt_output
else:
return dict(zip(["attack_"+team for team in teams] +
["defence_"+team for team in teams] +
['rho', 'home_adv'],
opt_output.x))
def calc_means(param_dict, homeTeam, awayTeam):
return [np.exp(param_dict['attack_'+homeTeam] + param_dict['defence_'+awayTeam] + param_dict['home_adv']),
np.exp(param_dict['defence_'+homeTeam] + param_dict['attack_'+awayTeam])]
def dixon_coles_simulate_match(params_dict, homeTeam, awayTeam, max_goals=8):
team_avgs = calc_means(params_dict, homeTeam, awayTeam)
team_pred = [[poisson.pmf(i, team_avg) for i in range(0, max_goals+1)] for team_avg in team_avgs]
output_matrix = np.outer(np.array(team_pred[0]), np.array(team_pred[1]))
correction_matrix = np.array([[rho_correction(home_goals, away_goals, team_avgs[0],
team_avgs[1], params_dict['rho']) for away_goals in range(2)]
for home_goals in range(2)])
output_matrix[:2,:2] = output_matrix[:2,:2] * correction_matrix
return output_matrix
class SoccerPrediction():
def __init__(self, league_name, pathFile, dixon=False):
self.league_name = league_name
self.db = DatabaseManager(league_name, pathFile)
self.data = self.db.getUsefulData()
self.model = self.poissonModel()
self.teams = self.db.teams
self.dixon = dixon
if(dixon):
self.dixon_params = self.dixon()
def dixon(self):
start = time.time()
reduced_set = self.db.data.iloc[0:400]
# return reduced_set
params = solve_parameters(reduced_set)
end = time.time()
spent_time_min = (end - start)//60
spent_time_sec = end - start - spent_time_min*60
print('Time spent: {} min {} sec'.format(spent_time_min, spent_time_sec))
return params
def updateData(self, data):
self.data = data
def poissonModel(self):
data = self._genData(self.data)
data_home = data.rename(columns = {'HomeTeam':'team', 'AwayTeam':'opponent','HomeGoals':'goals'})
data_home = data_home.assign(home = 1)
data_away = data.rename(columns={'AwayTeam':'team', 'HomeTeam':'opponent','AwayGoals':'goals'})
data_away = data_away.assign(home = 0)
# return data_home, data_away
# goal_model_data = pd.concat([data_home, data_away])
goal_model_data = pd.concat([data[['HomeTeam','AwayTeam','HomeGoals']].assign(home=1).rename(
columns={'HomeTeam':'team', 'AwayTeam':'opponent','HomeGoals':'goals'}),
data[['AwayTeam','HomeTeam','AwayGoals']].assign(home=0).rename(
columns={'AwayTeam':'team', 'HomeTeam':'opponent','AwayGoals':'goals'})])
poisson_model = smf.glm(formula="goals ~ home + team + opponent", data=goal_model_data,
family=sm.families.Poisson()).fit()
# print(poisson_model.summary())
return poisson_model
def _genData(self, data):
if(data is None):
data = self.data
data = data.loc[:,['HomeTeam', 'AwayTeam',
'HomeGoals', 'AwayGoals']]
return data
def checkStatsLeague(self, data=None, plot=True):
data = self._genData(data)
# print(data.mean())
poisson_pred = np.column_stack([[poisson.pmf(x, data.mean()[j]) for x in range(8)] for j in range(2)])
# plot histogram of actual goals
[values, bins, _] = plt.hist(data[['HomeGoals', 'AwayGoals']].values, range(9),
alpha=0.7, label=['Home', 'Away'],normed=True, color=["#FFA07A", "#20B2AA"])
# add lines for the Poisson distributions
pois1, = plt.plot([i-0.5 for i in range(1,9)], poisson_pred[:,0],
linestyle='-', marker='o',label="Home", color = '#CD5C5C')
pois2, = plt.plot([i-0.5 for i in range(1,9)], poisson_pred[:,1],
linestyle='-', marker='o',label="Away", color = '#006400')
leg=plt.legend(loc='upper right', fontsize=13, ncol=2)
leg.set_title("Poisson Actual ", prop = {'size':'14', 'weight':'bold'})
plt.xticks([i-0.5 for i in range(1,9)],[i for i in range(9)])
plt.xlabel("Goals per Match",size=13)
plt.ylabel("Proportion of Matches",size=13)
plt.title("Number of Goals per Match",size=14,fontweight='bold')
plt.ylim([-0.004, 0.4])
plt.tight_layout()
plt.show()
home_error = np.mean(abs(poisson_pred[:,0] - values[0]))
away_error = np.mean(abs(poisson_pred[:,1] - values[1]))
return [home_error, away_error]
def checkDiffInGoals(self, data=None):
data = self._genData(data)
skellam_pred = [skellam.pmf(i, data.mean()[0], data.mean()[1]) for i in range(-6,8)]
plt.hist(data[['HomeGoals']].values - data[['AwayGoals']].values, range(-6,8),
alpha=0.7, label='Actual',normed=True)
plt.plot([i+0.5 for i in range(-6,8)], skellam_pred,
linestyle='-', marker='o',label="Skellam", color = '#CD5C5C')
plt.legend(loc='upper right', fontsize=13)
plt.xticks([i+0.5 for i in range(-6,8)],[i for i in range(-6,8)])
plt.xlabel("Home Goals - Away Goals",size=13)
plt.ylabel("Proportion of Matches",size=13)
plt.title("Difference in Goals Scored (Home Team vs Away Team)",size=14,fontweight='bold')
plt.ylim([-0.004, 0.26])
plt.tight_layout()
plt.show()
def checkStatsMatch(self, team1, team2, data):
fig,(ax1,ax2) = plt.subplots(2, 1, figsize=(7,5))
team1_home = data[data['HomeTeam']==team1][['HomeGoals']].apply(pd.value_counts,normalize=True)
team1_home_pois = [poisson.pmf(i,np.sum(np.multiply(team1_home.values.T,team1_home.index.T),axis=1)[0]) for i in range(8)]
team2_home = data[data['HomeTeam']==team2][['HomeGoals']].apply(pd.value_counts,normalize=True)
team2_home_pois = [poisson.pmf(i,np.sum(np.multiply(team2_home.values.T,team2_home.index.T),axis=1)[0]) for i in range(8)]
team1_away = data[data['AwayTeam']==team1][['AwayGoals']].apply(pd.value_counts,normalize=True)
team1_away_pois = [poisson.pmf(i,np.sum(np.multiply(team1_away.values.T,team1_away.index.T),axis=1)[0]) for i in range(8)]
team2_away = data[data['AwayTeam']==team2][['AwayGoals']].apply(pd.value_counts,normalize=True)
team2_away_pois = [poisson.pmf(i,np.sum(np.multiply(team2_away.values.T,team2_away.index.T),axis=1)[0]) for i in range(8)]
ax1.bar(team1_home.index-0.4, team1_home.values.reshape(team1_home.shape[0]), width=0.4, color="#034694", label=team1)
ax1.bar(team2_home.index,team2_home.values.reshape(team2_home.shape[0]),width=0.4,color="#EB172B",label=team2)
pois1, = ax1.plot([i for i in range(8)], team1_home_pois,
linestyle='-', marker='o',label=team1, color = "#0a7bff")
pois1, = ax1.plot([i for i in range(8)], team2_home_pois,
linestyle='-', marker='o',label=team2, color = "#ff7c89")
leg=ax1.legend(loc='upper right', fontsize=12, ncol=2)
leg.set_title("Poisson Actual ", prop = {'size':'14', 'weight':'bold'})
ax1.set_xlim([-0.5,7.5])
ax1.set_ylim([-0.01,0.65])
ax1.set_xticklabels([])
# mimicing the facet plots in ggplot2 with a bit of a hack
# ax1.text(7.65, 0, ' Home ', rotation=-90,
# bbox={'facecolor':'#ffbcf6', 'alpha':0.5, 'pad':5})
# ax2.text(7.65, 0, ' Away ', rotation=-90,
# bbox={'facecolor':'#ffbcf6', 'alpha':0.5, 'pad':5})
ax2.bar(team1_away.index-0.4,team1_away.values.reshape(team1_away.shape[0]),width=0.4,color="#034694",label=team1)
ax2.bar(team2_away.index,team2_away.values.reshape(team2_away.shape[0]),width=0.4,color="#EB172B",label=team2)
pois1, = ax2.plot([i for i in range(8)], team1_away_pois,
linestyle='-', marker='o',label=team1, color = "#0a7bff")
pois1, = ax2.plot([i for i in range(8)], team2_away_pois,
linestyle='-', marker='o',label=team2, color = "#ff7c89")
ax2.set_xlim([-0.5,7.5])
ax2.set_ylim([-0.01,0.65])
ax1.set_title("Number of Goals per Match {} vs {}".format(team1,team2),size=14,fontweight='bold')
ax2.set_title("Number of Goals per Match {} vs {}".format(team2,team1),size=14,fontweight='bold')
ax2.set_xlabel("Goals per Match",size=13)
#ax2.text(-1.15, 0.9, 'Proportion of Matches', rotation=90, size=13)
plt.tight_layout()
plt.show()
def drawProbLeague(self, data=None):
data = self._genData(data)
prob = self._probGoalsDiff(0, data)
return 'Draw: {:.1f} %'.format(prob)
def homeWinProbLeague(self, goal_di_scarto, data=None):
data = self._genData(data)
prob = self._probGoalsDiff(goal_di_scarto, data)
return 'Home team wins with {} goals more, with prob: {:.1f} %'.format(goal_di_scarto, prob)
def _probGoalsDiff(self, diff, data):
goals_diff = diff
return skellam.pmf(goals_diff, data.mean()[0], data.mean()[1])
def predAvgGoalTeam(self, team, opponent, home_factor):
match = {'team':team,
'opponent':opponent,
'home':home_factor}
match_df = pd.DataFrame(match, index=[1])
n_goals = self.model.predict(match_df)
print('\nResult\n')
print(team.upper() + ' probably will score {:.2f} goals'.format(n_goals.values[0]))
return n_goals.values[0]
def simulate_match(self, homeTeam, awayTeam, max_goals=8):
self.max_goals = 8
if(self.dixon == True):
match_outcome = dixon_coles_simulate_match(self.dixon_params, homeTeam, awayTeam)
result = {'Match':[str(homeTeam + ' vs ' + awayTeam)],
'Outcome': [match_outcome]}
match = pd.DataFrame(result)
return match
else:
home_data = {'team':homeTeam,
'opponent':awayTeam,
'home':1}
away_data = {'team':awayTeam,
'opponent':homeTeam,
'home':0}
home_df = pd.DataFrame(home_data, index=[1])
away_df = pd.DataFrame(away_data, index=[1])
home_goals_avg = self.model.predict(home_df).values[0]
away_goals_avg = self.model.predict(away_df).values[0]
team_pred = [[poisson.pmf(i, team_avg) for i in range(0, max_goals+1)] for team_avg in [home_goals_avg, away_goals_avg]]
match_outcome = np.outer(np.array(team_pred[0]), np.array(team_pred[1]))
result = {'Match':[str(homeTeam + ' vs ' + awayTeam)],
'Outcome': [match_outcome]}
match = | pd.DataFrame(result) | pandas.DataFrame |
"""Collect model input data"""
import os
from dataclasses import dataclass
import pandas as pd
@dataclass
class ModelData:
# Directory containing core data files
data_dir: str = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir, 'data')
# Directory containing dispatch and demand traces
traces_dir: str = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, '1_traces', 'output')
def __post_init__(self):
"""Append data objects to class object"""
# Name of NTNDP file
self.ntndp_filename = '2016 Planning Studies - Additional Modelling Data and Assumptions summary.xlsm'
# Minimum reserve levels for each NEM region
self.minimum_reserve_levels = self.get_minimum_reserve_levels()
# Storage unit properties
self.battery_properties = self.get_ntndp_battery_properties()
# Generator and storage data Data
self.generators = self.get_generator_data()
self.storage = self.get_storage_unit_data()
# NEM zones and regions
self.nem_zones = self.get_nem_zones()
self.nem_regions = self.get_nem_regions()
# DUIDs for different units
self.scheduled_duids = self.get_scheduled_unit_duids()
self.semi_scheduled_duids = self.get_semi_scheduled_unit_duids()
self.solar_duids = self.get_solar_unit_duids()
self.wind_duids = self.get_wind_unit_duids()
self.thermal_duids = self.get_thermal_unit_duids()
self.hydro_duids = self.get_hydro_unit_duids()
self.storage_duids = self.get_storage_unit_duids()
self.slow_start_duids = self.get_slow_start_thermal_generator_ids()
self.quick_start_duids = self.get_quick_start_thermal_generator_ids()
# Mapping between NEM regions and zones
self.nem_region_zone_map = self.get_nem_region_zone_map()
# Mapping between DUIDs and NEM zones
self.duid_zone_map = self.get_duid_zone_map()
# Network incidence matrix
self.network_incidence_matrix = self.get_network_incidence_matrix()
# Links between adjacent NEM regions
self.links = self.get_network_links()
# Links that have constrained flows
self.links_constrained = self.get_link_powerflow_limits().keys()
# Power flow limits for constrained links
self.powerflow_limits = self.get_link_powerflow_limits()
# Demand traces for each day
self.demand = pd.read_pickle(os.path.join(self.traces_dir, 'demand_42.pickle'))
# Dispatch traces for each day
self.dispatch = pd.read_pickle(os.path.join(self.traces_dir, 'dispatch_42.pickle'))
def get_minimum_reserve_levels(self):
"""Minimum reserve levels for each NEM region"""
# Minimum reserve levels for each NEM region
df = pd.read_excel(os.path.join(self.data_dir, 'files', self.ntndp_filename), sheet_name='MRL', skiprows=1)
# Rename columns and set index as NEM region
df_o = (df.rename(columns={'Region': 'NEM_REGION', 'Minimum Reserve Level (MW)': 'MINIMUM_RESERVE_LEVEL'})
.set_index('NEM_REGION'))
# Keep latest minimum reserve values (SA1 has multiple MRL values with different start dates)
df_o = df_o.loc[~df_o.index.duplicated(keep='last'), 'MINIMUM_RESERVE_LEVEL'].to_dict()
return df_o
def get_ntndp_battery_properties(self):
"""Load battery properties from NTNDP database"""
# Battery properties from NTNDP worksheet
df = (pd.read_excel(os.path.join(self.data_dir, 'files', self.ntndp_filename),
sheet_name='Battery Properties', skiprows=1)
.rename(columns={'Battery': 'STORAGE_ID'}).set_index('STORAGE_ID'))
return df
def get_generator_data(self):
"""Load generator data"""
# Path to generator data file
path = os.path.join(self.data_dir, 'files', 'egrimod-nem-dataset-v1.3', 'akxen-egrimod-nem-dataset-4806603',
'generators', 'generators.csv')
# Load generator data into DataFrame
df = pd.read_csv(path, index_col='DUID')
return df
def get_nem_zones(self):
"""Get tuple of unique NEM zones"""
# Load generator information
df = self.get_generator_data()
# Extract nem zones from existing generators dataset
zones = tuple(df.loc[:, 'NEM_ZONE'].unique())
# There should be 16 zones
assert len(zones) == 16, 'Unexpected number of NEM zones'
return zones
def get_nem_regions(self):
"""Get tuple of unique NEM regions"""
# Load generator information
df = self.get_generator_data()
# Extract nem regions from existing generators dataset
regions = tuple(df.loc[:, 'NEM_REGION'].unique())
# There should be 5 NEM regions
assert len(regions) == 5, 'Unexpected number of NEM regions'
return regions
def get_nem_region_zone_map(self):
"""Construct mapping between NEM regions and the zones belonging to those regions"""
# Load generator information
df = self.get_generator_data()
# Map between NEM regions and zones
region_zone_map = (df[['NEM_REGION', 'NEM_ZONE']] .drop_duplicates(subset=['NEM_REGION', 'NEM_ZONE'])
.groupby('NEM_REGION')['NEM_ZONE'].apply(lambda x: tuple(x))).to_dict()
return region_zone_map
def get_duid_zone_map(self):
"""Get mapping between DUIDs and NEM zones"""
# Load generator and storage information
df_g = self.get_generator_data()
df_s = self.get_storage_unit_data()
# Get mapping between DUIDs and zones for generators and storage units
generator_map = df_g.loc[:, 'NEM_ZONE'].to_dict()
storage_map = df_s.loc[:, 'NEM_ZONE'].to_dict()
# Combine dictionaries
zone_map = {**generator_map, **storage_map}
return zone_map
def get_thermal_unit_duids(self):
"""Get thermal unit DUIDs"""
# Load generator information
df = self.get_generator_data()
# Thermal DUIDs
thermal_duids = df.loc[df['FUEL_CAT'] == 'Fossil'].index
return thermal_duids
def get_solar_unit_duids(self):
"""Get solar unit DUIDs"""
# Load generator information
df = self.get_generator_data()
# Solar DUIDs
solar_duids = df.loc[df['FUEL_CAT'] == 'Solar'].index
return solar_duids
def get_wind_unit_duids(self):
"""Get wind unit DUIDs"""
# Load generator information
df = self.get_generator_data()
# Wind DUIDs
wind_duids = df.loc[df['FUEL_CAT'] == 'Wind'].index
return wind_duids
def get_hydro_unit_duids(self):
"""Get hydro unit DUIDs"""
# Load generator information
df = self.get_generator_data()
# Hydro DUIDs
hydro_duids = df.loc[df['FUEL_CAT'] == 'Hydro'].index
return hydro_duids
def get_scheduled_unit_duids(self):
"""Get all scheduled unit DUIDs"""
# Load generator information
df = self.get_generator_data()
# Thermal DUIDs
scheduled_duids = df.loc[df['SCHEDULE_TYPE'] == 'SCHEDULED'].index
return scheduled_duids
def get_semi_scheduled_unit_duids(self):
"""Get all scheduled unit DUIDs"""
# Load generator information
df = self.get_generator_data()
# Semi scheduled DUIDs
semi_scheduled_duids = df.loc[df['SCHEDULE_TYPE'] == 'SEMI-SCHEDULED'].index
return semi_scheduled_duids
def get_network_incidence_matrix(self):
"""Construct network incidence matrix"""
# All NEM zones:
zones = self.get_nem_zones()
# Links connecting different zones. First zone is 'from' zone second is 'to' zone
links = ['NQ-CQ', 'CQ-SEQ', 'CQ-SWQ', 'SWQ-SEQ', 'SEQ-NNS',
'SWQ-NNS', 'NNS-NCEN', 'NCEN-CAN', 'CAN-SWNSW',
'CAN-NVIC', 'SWNSW-NVIC', 'LV-MEL', 'NVIC-MEL',
'TAS-LV', 'MEL-CVIC', 'SWNSW-CVIC', 'CVIC-NSA',
'MEL-SESA', 'SESA-ADE', 'NSA-ADE']
# Initialise empty matrix with NEM zones as row and column labels
incidence_matrix = pd.DataFrame(index=links, columns=zones, data=0)
# Assign values to 'from' and 'to' zones. +1 is a 'from' zone, -1 is a 'to' zone
for link in links:
# Get from and to zones
from_zone, to_zone = link.split('-')
# Set from zone element to 1
incidence_matrix.loc[link, from_zone] = 1
# Set to zone element to -1
incidence_matrix.loc[link, to_zone] = -1
return incidence_matrix
def get_network_links(self):
"""Links connecting adjacent NEM zones"""
return self.get_network_incidence_matrix().index
@staticmethod
def get_link_powerflow_limits():
"""Max forward and reverse power flow over links between zones"""
# Limits for interconnectors composed of single branches
interconnector_limits = {'SEQ-NNS': {'forward': 210, 'reverse': 107}, # Terranora
'SWQ-NNS': {'forward': 1078, 'reverse': 600}, # QNI
'TAS-LV': {'forward': 594, 'reverse': 478}, # Basslink
'MEL-SESA': {'forward': 600, 'reverse': 500}, # Heywood
'CVIC-NSA': {'forward': 220, 'reverse': 200}, # Murraylink
}
return interconnector_limits
def get_slow_start_thermal_generator_ids(self):
"""
Get IDs for existing and candidate slow start unit
A generator is classified as 'slow' if it cannot reach its
minimum dispatchable power output in one interval (e.g. 1 hour).
Note: A generator's classification of 'quick' or 'slow' depends on its
minimum dispatchable output level and ramp-rate. For candidate units
the minimum dispatchable output level is a function of the maximum
output level, and so is variable. As this level is not known ex ante,
all candidate thermal generators are assumed to operate the same way
as quick start units (i.e. they can reach their minimum dispatchable
output level in 1 trading interval (hour)).
"""
# Load generator information
df = self.get_generator_data()
# True if number of hours to ramp to min generator output > 1
mask_slow_start = df['MIN_GEN'].div(df['RR_STARTUP']).gt(1)
# Only consider coal and gas units
mask_technology = df['FUEL_CAT'].isin(['Fossil'])
# Get IDs for slow start generators
gen_ids = df.loc[mask_slow_start & mask_technology, :].index
return gen_ids
def get_quick_start_thermal_generator_ids(self):
"""
Get IDs for existing and candidate slow start unit
Note: A generator is classified as 'quick' if it can reach its
minimum dispatchable power output in one interval (e.g. 1 hour).
"""
# Load generator information
df = self.get_generator_data()
# Slow start unit IDs - previously identified
slow_gen_ids = self.get_slow_start_thermal_generator_ids()
# Filter for slow generator IDs (existing units)
mask_slow_gen_ids = df.index.isin(slow_gen_ids)
# Only consider coal and gas units
mask_existing_technology = df['FUEL_CAT'].isin(['Fossil'])
# Get IDs for quick start generators
existing_quick_gen_ids = df.loc[~mask_slow_gen_ids & mask_existing_technology, :].index
return existing_quick_gen_ids
def get_storage_unit_data(self):
"""Get storage unit IDs"""
# Path to generator data file
path = os.path.join(self.data_dir, 'files', 'NEM Registration and Exemption List.xls')
# Load generators and scheduled load data into DataFrame
df = | pd.read_excel(path, index_col='DUID', sheet_name='Generators and Scheduled Loads') | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 11:24:51 2020
@author: <NAME>
@project: Classe que trata o topic modeling
"""
from spacy.tokens import Doc
import numpy
from spacy.attrs import LOWER, POS, ENT_TYPE, IS_ALPHA
from neo4j import GraphDatabase
import pandas as pd
import os
#import subprocess
#import requests
#import unidecode
import re
import csv
from acessos import get_conn, read, persistir_banco, persistir_multiplas_linhas
import sys
import re, numpy as np, pandas as pd
from pprint import pprint
import spacy
# Gensim
import gensim
from gensim import corpora, models, similarities
import gensim, spacy, logging, warnings
import gensim.corpora as corpora
from gensim.utils import lemmatize, simple_preprocess
from gensim.models import CoherenceModel
import matplotlib.pyplot as plt
from gensim import corpora, models, similarities
# NLTK Stop words
from nltk.corpus import stopwords
from acessos import read, get_conn, persistir_uma_linha, persistir_multiplas_linhas, replace_df
from gensim.models.ldamulticore import LdaMulticore
import seaborn as sns
import matplotlib.colors as mcolors
#%matplotlib inline
warnings.filterwarnings("ignore",category=DeprecationWarning)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import os
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
from gensim.test.utils import datapath
class Topic_Modeling:
def __init__(self, language="pt-br", stop_words_list=[]):
self.language = language
self.stop_words = self._load_stop_words(stop_words_list)
self.nlp = self._load_spacy()
self.model_list =[]
self.coherence_values = []
self.lista_num_topics = []
self.melhor_modelo = None
def _load_spacy(self):
'''metodo privado que retorna o modelo do spacy baseado no idioma'''
#disable_list = ['parser', 'ner']
disable_list = []
if self.language == "pt-br":
nlp = spacy.load('pt_core_news_lg', disable=disable_list)
elif self.language == "us-en":
nlp = spacy.load("en_core_web_sm", disable=disable_list)
return nlp
def _load_stop_words(self, stop_words_list=[]):
'''metodo privado que retorna as stop words baseado no idioma'''
if self.language == "pt-br":
stop_words = stopwords.words('portuguese')
stop_words.extend(stop_words_list)
elif self.language == "us-en":
stop_words = stopwords.words('english') #Testar
stop_words.extend(['from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get', 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'even', 'right', 'line', 'even', 'also', 'may', 'take', 'come'])
stop_words.extend(stop_words_list)
return stop_words
def filtrar_pos_tag(self, texto, allowed_postags=["NOUN", "PROPN", "VERB", "ADJ"]):
texto_saida = ""
doc = self.nlp(texto)
for token in doc:
if token.pos_ in allowed_postags:
texto_saida += " {}".format(token)
return texto_saida
def replace_ner_por_label(self, texto):
texto_out = texto
doc = self.nlp(texto)
for ent in reversed(doc.ents):
#label = " _" + ent.label_ + "_ "
label = ent.label_
comeco = ent.start_char
fim = comeco + len(ent.text)
texto_out = texto_out [:comeco] + label + texto_out[fim:]
return texto_out
def processamento_inicial(self, lista_documentos):
'''remove emails, quebra de linhas e single quotes'''
#Tratando abreviações
lista_documentos = [re.sub('neh', 'né', sent) for sent in lista_documentos]
lista_documentos = [re.sub('td', 'tudo', sent) for sent in lista_documentos]
lista_documentos = [re.sub('tds', 'todos', sent) for sent in lista_documentos]
lista_documentos = [re.sub('vc', 'você', sent) for sent in lista_documentos]
lista_documentos = [re.sub('vcs', 'vocês', sent) for sent in lista_documentos]
lista_documentos = [re.sub('voce', 'você', sent) for sent in lista_documentos]
lista_documentos = [re.sub('tbm', 'também', sent) for sent in lista_documentos]
# Remove Emails
lista_documentos = [re.sub('\S*@\S*\s?', '', sent) for sent in lista_documentos]
# Remove new line characters
lista_documentos = [re.sub('\s+', ' ', sent) for sent in lista_documentos]
# Remove distracting single quotes
lista_documentos = [re.sub("\'", "", sent) for sent in lista_documentos]
return lista_documentos
def sent_to_words(self, sentences):
'''tokeniza um unico documento'''
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=False)) # deacc=True removes punctuations
def tokenizar(self, lista_documentos):
'''tokeniza uma lista de documentos'''
lista_documentos_tokenizado = list(self.sent_to_words(lista_documentos))
return lista_documentos_tokenizado
def montar_n_grams(self, lista_documentos_tokenizado):
'''monta bi_grams e tri_grams de uma lista de documentos tokenizado
utilizar este metodo depois de remover stop words'''
bigram = gensim.models.Phrases(lista_documentos_tokenizado, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[lista_documentos_tokenizado], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
#retorna lista bigram e trigram
self.bigram = [bigram_mod[doc] for doc in lista_documentos_tokenizado]
self.trigram = [trigram_mod[bigram_mod[doc]] for doc in lista_documentos_tokenizado]
return self.bigram , self.trigram
def get_n_grams(self):
return self.bigram , self.trigram
def lematizar_documentos(self, lista_documentos_tokenizado):
"""https://spacy.io/api/annotation"""
documentos_out = []
for sent in lista_documentos_tokenizado:
doc = self.nlp(" ".join(sent))
lista_tokens_lematizados = []
for token in doc :
lista_tokens_lematizados.append(token.lemma_)
documentos_out.append(lista_tokens_lematizados)
return documentos_out
def remover_stop_words(self, lista_documentos_tokenizado):
return [[word for word in simple_preprocess(str(doc)) if word not in self.stop_words] for doc in lista_documentos_tokenizado]
def montar_novo_corpus(self, nova_lista_documentos_lematizada, id2word):
print(id2word)
corpus = [id2word.doc2bow(text) for text in nova_lista_documentos_lematizada]
return corpus
def pre_processar_texto_ou_lista(self, texto_ou_lista, filtro_ner=True, allowed_postags=["NOUN","PROPN", "VERB", "ADJ"]):
if isinstance(texto_ou_lista, str):
lista_documentos = [texto_ou_lista]
else:
lista_documentos = texto_ou_lista
lista_documentos = self.processamento_inicial(lista_documentos)
if filtro_ner==True:
lista_documentos = [self.replace_ner_por_label(texto) for texto in lista_documentos]
# if filtro_pos_tag==True:
# lista_documentos = [self.filtrar_pos_tag(texto) for texto in lista_documentos]
lista_documentos = [self.filtrar_pos_tag(texto, allowed_postags) for texto in lista_documentos]
lista_documentos_tokenizado = self.tokenizar(lista_documentos)
lista_documentos_tokenizado_stop_words = self.remover_stop_words(lista_documentos_tokenizado)
lista_documento_bi_gram, lista_documento_tri_gram = self.montar_n_grams(lista_documentos_tokenizado_stop_words)
lista_documento_lematizada = self.lematizar_documentos(lista_documento_tri_gram)
#lista_documento_lematizada = lista_documento_bi_gram
return lista_documento_lematizada
def gerar_modelo_hdp(self, corpus, id2word, texts):
model_hdp = models.HdpModel(corpus, id2word=id2word)
coherencemodel = CoherenceModel(model=model_hdp, texts=texts, dictionary=id2word, coherence='c_v')
self.melhor_modelo = model_hdp
return model_hdp, coherencemodel.get_coherence()
def gerar_multiplos_modelos(self, id2word, corpus, texts, limit, start=2, step=3):
print("Start: {}".format(start))
print("limit: {}".format(limit))
print("Step: {}".format(step))
self.start = start
self.limit = limit
self.step = step
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
print("Gerando novo modelo...")
# model = gensim.models.ldamodel.LdaModel(corpus=corpus,
# id2word=id2word,
# num_topics=num_topics,
# random_state=100,
# update_every=1,
# chunksize=100,
# passes=10,
# alpha='auto',
# per_word_topics=True)
lda = LdaMulticore(corpus=corpus,
id2word=id2word,
random_state=100,
num_topics=num_topics,
workers=3)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=id2word, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
self.lista_num_topics.append(num_topics)
self.model_list = model_list
self.coherence_values = coherence_values
return model_list, coherence_values
def plotar_coerencia(self):
x = range(self.start, self.limit, self.step)
plt.plot(x, self.coherence_values)
plt.xlabel("Num de Tópicos")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
for m, cv in zip(x, self.coherence_values):
print("Num de Tópicos =", m, " valor coerência: ", round(cv, 4))
def classificar_novo_texto(self, texto, model,id2word):
lista_lematizada = self.pre_processar_texto_ou_lista(texto)
novo_corpus = self.montar_novo_corpus(lista_lematizada,id2word)
doc_bow = novo_corpus[0]
topicos = model[doc_bow]
#topicos_ordenados = sorted(topicos[0], key=lambda x: x[1], reverse=True)
topicos_ordenados = sorted(topicos, key=lambda x: x[1], reverse=True)
melhor_topico = topicos_ordenados[0]
#print(topicos_ordenados)
return melhor_topico, topicos_ordenados
def montar_id2word(self, lista_documento_lematizada):
id2word = corpora.Dictionary(lista_documento_lematizada)
return id2word
def montar_dict_models(self):
dict_models = {
"modelo": self.model_list,
"coerencia":self.coherence_values,
"num_topics": self.lista_num_topics
}
return dict_models
def salvar_modelos(self, diretorio, folder_name):
dict_models = self.montar_dict_models()
df_models = | pd.DataFrame(dict_models) | pandas.DataFrame |
import pandas as pd
import xarray as xr
import numpy as np
import sys
from config import *
from sklearn.ensemble import RandomForestRegressor
from joblib import dump, load
from sklearn.model_selection import PredefinedSplit,RandomizedSearchCV
def inputs():
msg = "You must specify whether to retrain the model (True) or just load from file (False)"
try:
retrain_model = sys.argv[1]
except:
sys.exit(msg)
if retrain_model not in ["True", "False"]:
sys.exit(msg)
return retrain_model
def get_numpy_arrays(df):
#Split the df into two numpy arrays, one for X features and one for Y outputs
y = 'LST_Day_CMG' #the output column
df_y = df[[y]]
df_x = df.drop([y], axis=1)
return df_x.to_numpy(), df_y.to_numpy().ravel()
def train(df):
print ('Inside train func')
sys.exit()
#Setup model
rf = RandomForestRegressor(n_estimators = 10, verbose=1)
#Train the model on training data
xtrain,ytrain = get_numpy_arrays(df)
rf.fit(xtrain,ytrain)
#save trained model to disk
dump(rf,data_root+'trained_model.joblib')
#Evaluate model training
training_score = rf.score(xtrain, ytrain)
return rf, training_score
def train_with_optimisation(df_train,df_validate):
"""Train and evaluate the model and save to disk"""
# Bring together train and validate sets
X = pd.concat([df_train, df_validate])
X_Train, Y_Train = get_numpy_arrays(X)
# Create a list where train data indices are -1 and validation data indices are 0
idx1 = [1] * len(df_train)
idx2 = [0] * len(df_validate)
split_index = idx1 + idx2
pds = PredefinedSplit(test_fold = split_index)
#Setup random search hyperparam opt
random_search = {'n_estimators': list(np.linspace(10, 100, 10, dtype = int)),
'max_depth': list(np.linspace(10, 1200, 10, dtype = int)) + [None],
'max_features': ['auto', 'sqrt','log2', None],
'min_samples_leaf': [4, 6, 8, 12],
'min_samples_split': [5, 7, 10, 14],
}
clf = RandomForestRegressor()
model = RandomizedSearchCV(estimator = clf,
param_distributions = random_search,
n_iter = 2,
cv = pds,
verbose= 5,
random_state= 101,
n_jobs = 2) #njobs=-1 for all processors
model.fit(X_Train,Y_Train)
print('completed model train')
def predict(model,df):
"""Use the trained model to make predictions on the test df, then save to disk"""
#Get the test data as arrays
xtest,ytest = get_numpy_arrays(df)
#Evaluate how good our model predictions are
testing_score = model.score(xtest, ytest)
#...also get the actual predicions themselves
ypred = model.predict(xtest) #Make some predictions on the test data using the trained model
#...and the error in these predictions
relative_error = (ypred - ytest)/ytest
#Create a df copy
dfIO=df.copy()
#Append error and predicitons to test df
dfIO['predictions'] = ypred
dfIO['relative_error'] = relative_error
#Add the testing score as an attribute
dfIO.attrs['testing_score'] = testing_score
#IO
dfIO.to_pickle(data_root+"predictions.pkl")
print ('Max/min relative error:', max(abs(relative_error)), min(abs(relative_error)))
return testing_score
print ('Starting ML')
retrain_model = inputs()
#Load the data
df = | pd.read_pickle(clean_data) | pandas.read_pickle |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from tests.fixtures import DataTestCase
import mock
from tsfresh.feature_extraction import MinimalFCParameters
from tsfresh.transformers.relevant_feature_augmenter import RelevantFeatureAugmenter
class RelevantFeatureAugmenterTestCase(DataTestCase):
def setUp(self):
self.test_df = self.create_test_data_sample()
fc_parameters = {"length": None}
self.kind_to_fc_parameters = {"a": fc_parameters.copy(),
"b": fc_parameters.copy()}
def test_not_fitted(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
self.assertRaises(RuntimeError, augmenter.transform, X)
def test_no_timeseries(self):
augmenter = RelevantFeatureAugmenter()
X = | pd.DataFrame() | pandas.DataFrame |
import os
import gzip
import warnings
import pandas as pd
warnings.simplefilter("ignore")
import pickle
def outlier_analysis(df, model_dir):
_df = df[df["is_rescurable_homopolymer"]].reset_index(drop=True)
if not len(_df):
return df
__df = df[~df["is_rescurable_homopolymer"]].reset_index(drop=True)
at_ins_df = _df[_df["is_at_ins"] == 1].reset_index(drop=True)
at_ins_df = find_outliers(at_ins_df, "at_ins", model_dir)
at_del_df = _df[_df["is_at_del"] == 1].reset_index(drop=True)
at_del_df = find_outliers(at_del_df, "at_del", model_dir)
gc_ins_df = _df[_df["is_gc_ins"] == 1].reset_index(drop=True)
gc_ins_df = find_outliers(gc_ins_df, "gc_ins", model_dir)
gc_del_df = _df[_df["is_gc_del"] == 1].reset_index(drop=True)
gc_del_df = find_outliers(gc_del_df, "gc_del", model_dir)
return | pd.concat([__df, at_ins_df, at_del_df, gc_ins_df, gc_del_df], axis=0) | pandas.concat |
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import geopandas as gpd
from energy_demand.read_write import data_loader, read_data
from energy_demand.basic import date_prop
from energy_demand.basic import basic_functions
from energy_demand.basic import lookup_tables
from energy_demand.technologies import tech_related
from energy_demand.plotting import basic_plot_functions
from energy_demand.plotting import result_mapping
from energy_demand.plotting import fig_p2_weather_val
def total_demand_national_scenarios(
scenario_result_paths,
sim_yrs,
fueltype_str,
path_out_plots
):
dict_scenarios_weather_yrs = {}
columns = [
'weather_yr',
'national_peak_demand']
fueltype_int = tech_related.get_fueltype_int(fueltype_str)
# ----------------
# Read all data inform of scenario, simulationyr, weather_yrs
# ----------------
for scenario_path in scenario_result_paths:
scenario_name = os.path.split(scenario_path)[-1]
dict_scenarios_weather_yrs[scenario_name] = {}
weather_yrs = []
# Get all folders with weather_yr run results (name of folder is scenario)
weather_yr_scenarios_paths = os.listdir(scenario_path)
for simulation_run in weather_yr_scenarios_paths:
if simulation_run != '_results_PDF_figs':
weather_yr_scenarios_paths = os.listdir(os.path.join(scenario_path, simulation_run))
for weather_yr_scenario_path in weather_yr_scenarios_paths:
try:
split_path_name = weather_yr_scenario_path.split("__")
weather_yr = int(split_path_name[0])
path_to_weather_yr = os.path.join(scenario_path, simulation_run, "{}__{}".format(weather_yr, 'all_stations'))
weather_yrs.append((weather_yr, path_to_weather_yr))
except:
pass
for simulation_yr in sim_yrs:
dict_scenarios_weather_yrs[scenario_name][simulation_yr] = pd.DataFrame(columns=columns)
for weather_yr, path_to_weather_yr in weather_yrs:
seasons = date_prop.get_season(year_to_model=2015)
model_yeardays_daytype, _, _ = date_prop.get_yeardays_daytype(year_to_model=2015)
results_container = read_data.read_in_results(
os.path.join(path_to_weather_yr, 'model_run_results_txt'),
seasons,
model_yeardays_daytype)
# ---------------------------------------------------
# Calculate hour with national peak demand
# This may be different depending on the weather yr
# ---------------------------------------------------
ele_regions_8760 = results_container['ed_fueltype_regs_yh'][simulation_yr][fueltype_int]
sum_all_regs_fueltype_8760 = np.sum(ele_regions_8760, axis=0) # Sum for every hour
max_day = int(basic_functions.round_down((np.argmax(sum_all_regs_fueltype_8760) / 24), 1))
max_h = np.argmax(sum_all_regs_fueltype_8760)
max_demand = np.max(sum_all_regs_fueltype_8760)
# Calculate the national peak demand in GW
national_peak_GW = np.max(sum_all_regs_fueltype_8760)
# -----------------------
# Add to final container
# -----------------------
line_entry = [[
weather_yr,
national_peak_GW
]]
line_df = pd.DataFrame(line_entry, columns=columns)
existing_df = dict_scenarios_weather_yrs[scenario_name][simulation_yr]
appended_df = existing_df.append(line_df)
dict_scenarios_weather_yrs[scenario_name][simulation_yr] = appended_df
# ------------------------------------------------------------------------------------------
# Create plot
# ------------------------------------------------------------------------------------------
print("....create plot")
weather_yr_to_plot = 1979 #TODO
color_list = ['red', 'green', 'orange', '#37AB65', '#C0E4FF', '#3DF735', '#AD6D70', '#EC2504', '#8C0B90', '#27B502', '#7C60A8', '#CF95D7', '#F6CC1D']
# Calculate quantiles
quantile_95 = 0.95
quantile_05 = 0.05
# Create dataframe with rows as scenario and lines as simulation yrs
scenarios = list(dict_scenarios_weather_yrs.keys())
# Containers
df_total_demand_2015 = pd.DataFrame(columns=scenarios)
df_q_95_scenarios = pd.DataFrame(columns=scenarios)
df_q_05_scenarios = pd.DataFrame(columns=scenarios)
for simulation_yr in sim_yrs:
line_entries_95 = []
line_entries_05 = []
line_entries_tot_h = []
for scenario_name in scenarios:
print("-- {} {}".format(scenario_name, simulation_yr))
# Calculate entires over year
df_weather_yrs = dict_scenarios_weather_yrs[scenario_name][simulation_yr]
df_q_95 = df_weather_yrs['national_peak_demand'].quantile(quantile_95)
df_q_05 = df_weather_yrs['national_peak_demand'].quantile(quantile_05)
peak_weather_yr_2015 = df_weather_yrs[df_weather_yrs['weather_yr']==weather_yr_to_plot]['national_peak_demand'].values[0]
line_entries_95.append(df_q_95)
line_entries_05.append(df_q_05)
line_entries_tot_h.append(peak_weather_yr_2015)
# Try to smooth lines
try:
sim_yrs_smoothed, line_entries_tot_h_smoothed = basic_plot_functions.smooth_data(sim_yrs, line_entries_tot_h, num=40000)
except:
sim_yrs_smoothed = sim_yrs
line_entries_tot_h_smoothed = line_entries_tot_h
df_q_95_scenarios = df_q_95_scenarios.append(pd.DataFrame([line_entries_95], columns=scenarios))
df_q_05_scenarios = df_q_05_scenarios.append( | pd.DataFrame([line_entries_05], columns=scenarios) | pandas.DataFrame |
import pandas as pd
import os
import cv2
import numpy as np
import random
def get_diff_scaled(img1,img2,scale):
return ((np.clip((img1.astype('int32') - img2.astype('int32')) * scale, -255, 255) + np.ones_like(
img1) * 255) / 2).astype('uint8')
def get_diff(img1,img2):
return ((img1.astype('int32')-img2.astype('int32')+np.ones_like(img1)*255)/2).astype('uint8')
def run_patch_matching_3(pm_mode=0,args=None):
# select rep mode
if pm_mode == 0:
representations = ['invrepnet', 'org']
elif pm_mode == 1:
representations = ['invrepnet_gray', 'org_gray']
elif pm_mode == 2:
representations = ['additional_rep']
else:
exit("error")
# obtain image names
images = next(os.walk(args.task_invrep_dirs))[-1]
scenes_list = list(set([im.split('_im')[0] for im in images]))
scenes_list.sort()
num_scenes = float(len(scenes_list))
test_image_dir = [args.task_image_dirs, args.task_invrep_dirs]
#set iters (number of patches) in each image
iters=100
print("Iters = {}".format(iters))
#set seed
random.seed(2019)
#set patch sizes
patch_sizes = np.array([32, 64, 128])
col_names = ['32', '64', '128']
#choose matching method
methods = ['cv2.TM_CCORR_NORMED']
method_final=[]
loglist = []
#patchmathcing main loop
for m, meth in enumerate(methods):
to_csv = np.copy(patch_sizes)
for reptype in representations:
if 'org' in reptype or 'additional' in reptype:
rt =0
elif 'invrepnet' in reptype:
rt = 1
else:
exit('error')
method_mean_iou_0 = np.zeros(len(patch_sizes))
for sl, scene_name in enumerate(scenes_list):
#read images
if args.out_channels == 3 or args.out_channels == 1:
im0_rep = cv2.imread(os.path.join(test_image_dir[rt], scene_name + '_im0.png'))
im1_rep = cv2.imread(os.path.join(test_image_dir[rt], scene_name + '_im1.png'))
else:
im0_rep=np.load(os.path.join(test_image_dir[rt], scene_name + '_im0.png.npy'))
im1_rep=np.load(os.path.join(test_image_dir[rt], scene_name + '_im1.png.npy'))
#convert to gray if gray mode
if 'gray' in reptype:
if 'invrepnet' in reptype:
if 'mean' in reptype:
im0_rep = im0_rep.mean(-1).astype('uint8')
im1_rep = im1_rep.mean(-1).astype('uint8')
else:
im0_rep = cv2.cvtColor(im0_rep, cv2.COLOR_BGR2GRAY)
im1_rep = cv2.cvtColor(im1_rep, cv2.COLOR_BGR2GRAY)
else:
im0_rep = cv2.cvtColor(im0_rep, cv2.COLOR_BGR2GRAY)
im1_rep = cv2.cvtColor(im1_rep, cv2.COLOR_BGR2GRAY)
scene_mean_iou_0 = np.zeros(len(patch_sizes))
#loop over patch sizes
for jj, ps in enumerate(patch_sizes):
np.random.seed(2019)
max_x = im1_rep.shape[1] - ps
max_y = im1_rep.shape[0] - ps
x = np.random.randint(0, max_x, size=iters)
y = np.random.randint(0, max_y, size=iters)
patch_size_iou_sum = 0
#random patches iter loop
for ii in range(iters):
template, cr_location = get_random_crop(im1_rep, ps, ps, [x[ii], y[ii]])
# Apply template Matching
if 'gray' in reptype:
res = cv2.matchTemplate(im0_rep, template, eval(meth))
else:
res=cv2.matchTemplate(im0_rep[:,:,0], template[:,:,0], eval(meth))
for ch in range(1, args.out_channels):
res= res * cv2.matchTemplate(im0_rep[:,:,ch], template[:,:,ch], eval(meth))
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if eval(meth) in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + ps, top_left[1] + ps)
box_detected = [top_left[0], top_left[1], bottom_right[0], bottom_right[1]]
box_gt = [cr_location[0], cr_location[1], cr_location[0] + ps, cr_location[1] + ps]
#calculate accuracy IOU
patch_size_iou_sum = patch_size_iou_sum + bb_intersection_over_union(box_detected, box_gt)
scene_mean_iou_0[jj] = patch_size_iou_sum / float(iters)
printlog(loglist,'{}: method_0, scene: {}, patch_size= {}, mean_iou= {:4}'.format(reptype, scene_name, ps, scene_mean_iou_0[jj]))
method_mean_iou_0 += scene_mean_iou_0 / num_scenes
printlog(loglist,'final mean_iou for match method: {}'.format(meth))
for kk, ps in enumerate(patch_sizes):
printlog(loglist,'{}: method_0: patch_size={}, mean_iou={}'.format(reptype, ps, method_mean_iou_0[kk]))
printlog(loglist,'\n*****************************************************************************************************\n')
to_csv = np.vstack([to_csv, method_mean_iou_0])
if reptype == 'invrepnet' or reptype == 'invrepnet_multi_all':
method_final.append(method_mean_iou_0)
#saving results to CSV
I = pd.Index(representations, name="rows")
C = | pd.Index(col_names, name="columns") | pandas.Index |
import itertools
import logging
import json
import networkit as nk
import pandas as pd
from src.generators.graphs.SBM import SBM
from src.generators.graphs.ErdosRenyi import ErdosRenyi
from src.generators.graphs.BarabasiAlbert import BarabasiAlbert
from src.measures.FairHarmonicCentrality import FairGroupHarmonicCentrality,GroupHarmonicCentrality
from pathlib import Path
class Harmonic:
'''
instance= {
type = "Syntetic or Real"
graph = [{ "name": ,
"parameters":{"n": ,
"k":,
"structure": ,
"threshold"; ,
} }],
experiments = {"mod": rnd\pagerank\maxHitting\classic\maxDeg\sampleInEachCommunity\maxDegreeInEachCommunity\maxHCInEachCommunity,
"Ssize": [ ] ,
"nRun": ,
}
}
'''
def __init__(self,instance):
self.instance = instance
self.graphs = []
self.communities = []
self.names = []
self.results = []
self.node_community_mapping = []
self.parameters = []
def run(self):
if(self.instance['type'] in ['synthetic','Synthetic']):
self.runSynteticExperiments()
if(self.instance['type'] in ['real','Real']):
self.runRealExperiments()
if(self.instance['experiments']['mod'] in ['rnd','random','rd']):
fairSetSizes = self.instance['experiments']['sSize']
trials = self.instance['experiments']['nRun']
results = []
communityIndex = 0
for graph in self.graphs:
result = {'graph' : self.names[communityIndex],
'parameters': self.parameters[communityIndex],
'experiment_mod': self.instance['experiments']['mod'],
'experiments': []}
for size in fairSetSizes:
FGH = FairGroupHarmonicCentrality(graph, self.communities[communityIndex], None)
FGH.set_k(size)
FGH.sampleS(trials)
FGH.computeGroupsCentralities()
FGH.computeFairGroupHarmonicCentrality(FGH.get_S())
#print("Group that maximizes GHC ",FGH.get_S())
#print("GH ",FGH.get_GHC_max_group())
PoF = FGH.get_price_of_fairness()
res = {}
res['type_of_exp'] = self.instance['experiments']['mod']
res['sampling_trials'] = trials
res['fair_set_size'] = len(FGH.get_S())
res['fair_set'] = FGH.get_S()
res['group_harmonic'] = FGH.get_GHC_max_group()
res['PoF'] = FGH.get_price_of_fairness()
res['fair_harmonic_centrality'] = FGH.get_FGHC()
res['communities_dimension'] = FGH.get_communities_size()
res['node_community_mapping'] = self.node_community_mapping[communityIndex]
res['execution_time'] = FGH.get_overall_time()
res['fair_group_centrality_time'] = FGH.get_exec_time()
res['fair_group_centrality_community_time'] = FGH.get_time_per_comm()
#result['experiments'].append(FGH)
'''
logging.debug("Max Group Harmonic Centrality: %r"%FGH.get_GHC_max_group())
if(PoF == -1):
logging.debug("PoF: Undefined")
else:
logging.debug("PoF: %r"%PoF)'''
result['experiments'].append(res)
communityIndex += 1
results.append(result)
elif(self.instance['experiments']['mod'] in ['pr','PageRank','pagerank']):
fairSetSizes = self.instance['experiments']['sSize']
trials = self.instance['experiments']['nRun']
results = []
communityIndex = 0
for graph in self.graphs:
result = {'graph': self.names[communityIndex],
'parameters': self.parameters[communityIndex],
'experiment_mod': self.instance['experiments']['mod'],
'experiments': []}
for size in fairSetSizes:
FGH = FairGroupHarmonicCentrality(graph, self.communities[communityIndex], None)
FGH.set_k(size)
FGH.samplePageRankS(trials)
FGH.computeGroupsCentralities()
FGH.computeFairGroupHarmonicCentrality(FGH.get_S())
PoF = FGH.get_price_of_fairness()
res = {}
res['type_of_exp'] = self.instance['experiments']['mod']
res['sampling_trials'] = trials
res['fair_set_size'] = len(FGH.get_S())
res['fair_set'] = FGH.get_S()
res['group_harmonic'] = FGH.get_GHC_max_group()
res['PoF'] = FGH.get_price_of_fairness()
res['fair_harmonic_centrality'] = FGH.get_FGHC()
res['communities_dimension'] = FGH.get_communities_size()
res['node_community_mapping'] = self.node_community_mapping[communityIndex]
res['execution_time'] = FGH.get_overall_time()
res['fair_group_centrality_time'] = FGH.get_exec_time()
res['fair_group_centrality_community_time'] = FGH.get_time_per_comm()
'''logging.debug("Max Group Harmonic Centrality: %r"%FGH.get_GHC_max_group())
if(PoF == -1):
logging.debug("PoF: Undefined")
else:
logging.debug("PoF: %r"%PoF)'''
result['experiments'].append(res)
communityIndex += 1
results.append(result)
elif (self.instance['experiments']['mod'] in ['sampleInEachCommunity', 'siec']):
fairSetSizes = self.instance['experiments']['sSize']
trials = self.instance['experiments']['nRun']
results = []
communityIndex = 0
for graph in self.graphs:
result = {'graph': self.names[communityIndex],
'parameters': self.parameters[communityIndex],
'experiment_mod': self.instance['experiments']['mod'],
'experiments': []}
FGH = FairGroupHarmonicCentrality(graph, self.communities[communityIndex], None)
FGH.sampleInEachCommunity()
FGH.computeGroupsCentralities()
FGH.computeFairGroupHarmonicCentrality(FGH.get_S())
# print("Group that maximizes GHC ",FGH.get_S())
# print("GH ",FGH.get_GHC_max_group())
PoF = FGH.get_price_of_fairness()
res = {}
res['type_of_exp'] = self.instance['experiments']['mod']
res['sampling_trials'] = -1
res['fair_set_size'] = len(FGH.get_S())
res['fair_set'] = FGH.get_S()
res['group_harmonic'] = FGH.get_GHC_max_group()
res['PoF'] = FGH.get_price_of_fairness()
res['fair_harmonic_centrality'] = FGH.get_FGHC()
res['communities_dimension'] = FGH.get_communities_size()
res['node_community_mapping'] = self.node_community_mapping[communityIndex ]
res['execution_time'] = FGH.get_overall_time()
res['fair_group_centrality_time'] = FGH.get_exec_time()
res['fair_group_centrality_community_time'] = FGH.get_time_per_comm()
'''logging.debug("Group Harmonic Centrality: %r" % FGH.get_GH())
if (PoF == -1):
logging.debug("PoF: Undefined")
else:
logging.debug("PoF: %r" % PoF)'''
result['experiments'].append(res)
communityIndex+=1
results.append(result)
elif (self.instance['experiments']['mod'] in ['maxHitting', 'MH','mh']):
fairSetSizes = self.instance['experiments']['sSize']
trials = self.instance['experiments']['nRun']
results = []
communityIndex = 0
for graph in self.graphs:
result = {'graph': self.names[communityIndex],
'parameters': self.parameters[communityIndex],
'experiment_mod': self.instance['experiments']['mod'],
'experiments': []}
for size in fairSetSizes:
FGH = FairGroupHarmonicCentrality(graph, self.communities[communityIndex], None)
FGH.set_k(size)
FGH.maxHitting()
FGH.computeGroupsCentralities()
FGH.computeFairGroupHarmonicCentrality(FGH.get_S())
# print("Group that maximizes GHC ",FGH.get_S())
# print("GH ",FGH.get_GHC_max_group())
PoF = FGH.get_price_of_fairness()
res = {}
res['type_of_exp'] = self.instance['experiments']['mod']
res['sampling_trials'] = -1
res['fair_set_size'] = len(FGH.get_S())
res['fair_set'] = FGH.get_S()
res['group_harmonic'] = FGH.get_GHC_max_group()
res['PoF'] = FGH.get_price_of_fairness()
res['fair_harmonic_centrality'] = FGH.get_FGHC()
res['communities_dimension'] = FGH.get_communities_size()
res['node_community_mapping'] = self.node_community_mapping[communityIndex]
res['execution_time'] = FGH.get_overall_time()
res['fair_group_centrality_time'] = FGH.get_exec_time()
res['fair_group_centrality_community_time'] = FGH.get_time_per_comm()
'''logging.debug("Group Harmonic Centrality: %r" % FGH.get_GH())
if (PoF == -1):
logging.debug("PoF: Undefined")
else:
logging.debug("PoF: %r" % PoF)'''
result['experiments'].append(res)
communityIndex+=1
results.append(result)
elif (self.instance['experiments']['mod'] in ['Classic','classic', 'CL', 'cl']):
fairSetSizes = self.instance['experiments']['sSize']
trials = self.instance['experiments']['nRun']
results = []
communityIndex = 0
for graph in self.graphs:
result = {'graph': self.names[communityIndex],
'parameters': self.parameters[communityIndex],
'experiment_mod': self.instance['experiments']['mod'],
'experiments': []}
for size in fairSetSizes:
FGH = FairGroupHarmonicCentrality(graph, self.communities[communityIndex], None)
FGH.set_S(None)
FGH.set_k(size)
FGH.computeGroupsCentralities()
FGH.computeFairGroupHarmonicCentrality(FGH.get_S())
# print("Group that maximizes GHC ",FGH.get_S())
# print("GH ",FGH.get_GHC_max_group())
PoF = FGH.get_price_of_fairness()
res = {}
res['type_of_exp'] = self.instance['experiments']['mod']
res['sampling_trials'] = -1
res['fair_set_size'] = len(FGH.get_S())
res['fair_set'] = FGH.get_S()
res['group_harmonic'] = FGH.get_GHC_max_group()
res['PoF'] = FGH.get_price_of_fairness()
res['fair_harmonic_centrality'] = FGH.get_FGHC()
res['communities_dimension'] = FGH.get_communities_size()
res['node_community_mapping'] = self.node_community_mapping[communityIndex]
res['execution_time'] = FGH.get_overall_time()
res['fair_group_centrality_time'] = FGH.get_exec_time()
res['fair_group_centrality_community_time'] = FGH.get_time_per_comm()
'''logging.debug("Group Harmonic Centrality: %r" % FGH.get_GH())
if (PoF == -1):
logging.debug("PoF: Undefined")
else:
logging.debug("PoF: %r" % PoF)'''
result['experiments'].append(res)
communityIndex+=1
results.append(result)
elif (self.instance['experiments']['mod'] in ['maxDeg', 'md', 'MD']):
fairSetSizes = self.instance['experiments']['sSize']
trials = self.instance['experiments']['nRun']
results = []
communityIndex = 0
for graph in self.graphs:
result = {'graph': self.names[communityIndex],
'parameters': self.parameters[communityIndex],
'experiment_mod': self.instance['experiments']['mod'],
'experiments': []}
for size in fairSetSizes:
FGH = FairGroupHarmonicCentrality(graph, self.communities[communityIndex], None)
FGH.set_k(size)
FGH.maxDegS()
FGH.computeGroupsCentralities()
FGH.computeFairGroupHarmonicCentrality(FGH.get_S())
# print("Group that maximizes GHC ",FGH.get_S())
# print("GH ",FGH.get_GHC_max_group())
PoF = FGH.get_price_of_fairness()
res = {}
res['type_of_exp'] = self.instance['experiments']['mod']
res['sampling_trials'] = -1
res['fair_set_size'] = len(FGH.get_S())
res['fair_set'] = FGH.get_S()
res['group_harmonic'] = FGH.get_GHC_max_group()
res['PoF'] = FGH.get_price_of_fairness()
res['fair_harmonic_centrality'] = FGH.get_FGHC()
res['communities_dimension'] = FGH.get_communities_size()
res['node_community_mapping'] = self.node_community_mapping[communityIndex]
res['execution_time'] = FGH.get_overall_time()
res['fair_group_centrality_time'] = FGH.get_exec_time()
res['fair_group_centrality_community_time'] = FGH.get_time_per_comm()
'''logging.debug("Group Harmonic Centrality: %r" % FGH.get_GH())
if (PoF == -1):
logging.debug("PoF: Undefined")
else:
logging.debug("PoF: %r" % PoF)'''
result['experiments'].append(res)
communityIndex+=1
results.append(result)
elif (self.instance['experiments']['mod'] in ['maxDegInEachCommunity', 'mdiec', 'MDIEC']):
fairSetSizes = self.instance['experiments']['sSize']
trials = self.instance['experiments']['nRun']
results = []
communityIndex = 0
for graph in self.graphs:
result = {'graph': self.names[communityIndex],
'parameters': self.parameters[communityIndex],
'experiment_mod': self.instance['experiments']['mod'],
'experiments': []}
FGH = FairGroupHarmonicCentrality(graph, self.communities[communityIndex], None)
#FGH.set_k(size)
FGH.maxDegreeInEachCommunity()
FGH.computeGroupsCentralities()
FGH.computeFairGroupHarmonicCentrality(FGH.get_S())
# print("Group that maximizes GHC ",FGH.get_S())
# print("GH ",FGH.get_GHC_max_group())
PoF = FGH.get_price_of_fairness()
res = {}
res['type_of_exp'] = self.instance['experiments']['mod']
res['sampling_trials'] = -1
res['fair_set_size'] = len(FGH.get_S())
res['fair_set'] = FGH.get_S()
res['group_harmonic'] = FGH.get_GHC_max_group()
res['PoF'] = FGH.get_price_of_fairness()
res['fair_harmonic_centrality'] = FGH.get_FGHC()
res['communities_dimension'] = FGH.get_communities_size()
res['node_community_mapping'] = self.node_community_mapping[communityIndex]
res['execution_time'] = FGH.get_overall_time()
res['fair_group_centrality_time'] = FGH.get_exec_time()
res['fair_group_centrality_community_time'] = FGH.get_time_per_comm()
'''logging.debug("Group Harmonic Centrality: %r" % FGH.get_GH())
if (PoF == -1):
logging.debug("PoF: Undefined")
else:
logging.debug("PoF: %r" % PoF)'''
result['experiments'].append(res)
communityIndex+=1
results.append(result)
elif (self.instance['experiments']['mod'] in ['maxHCInEachCommunity', 'mhciec', 'MHCIEC']):
fairSetSizes = self.instance['experiments']['sSize']
trials = self.instance['experiments']['nRun']
results = []
communityIndex = 0
for graph in self.graphs:
result = {'graph': self.names[communityIndex],
'parameters': self.parameters[communityIndex],
'experiment_mod': self.instance['experiments']['mod'],
'experiments': []}
FGH = FairGroupHarmonicCentrality(graph, self.communities[communityIndex], None)
# FGH.set_k(size)
FGH.maxHCInEachCommunity()
FGH.computeGroupsCentralities()
FGH.computeFairGroupHarmonicCentrality(FGH.get_S())
# print("Group that maximizes GHC ",FGH.get_S())
# print("GH ",FGH.get_GHC_max_group())
PoF = FGH.get_price_of_fairness()
res = {}
res['type_of_exp'] = self.instance['experiments']['mod']
res['sampling_trials'] = -1
res['fair_set_size'] = len(FGH.get_S())
res['fair_set'] = FGH.get_S()
res['group_harmonic'] = FGH.get_GHC_max_group()
res['PoF'] = FGH.get_price_of_fairness()
res['fair_harmonic_centrality'] = FGH.get_FGHC()
res['communities_dimension'] = FGH.get_communities_size()
res['node_community_mapping'] = self.node_community_mapping[communityIndex ]
res['execution_time'] = FGH.get_overall_time()
res['fair_group_centrality_time'] = FGH.get_exec_time()
res['fair_group_centrality_community_time'] = FGH.get_time_per_comm()
logging.debug("Group Harmonic Centrality: %r" % FGH.get_GH())
if (PoF == -1):
logging.debug("PoF: Undefined")
else:
logging.debug("PoF: %r" % PoF)
result['experiments'].append(res)
communityIndex += 1
results.append(result)
self.results.extend(results)
def runRealExperiments(self):
logging.info("Loading Communities")
communities = []
with open(self.instance['inputPathCommunities'], 'r') as f:
data = f.read()
for line in data.split("\n"):
community = []
for elem in line.split("\t"):
community.append(int(elem))
communities.append(community)
logging.info("Loading Communities: Completed")
logging.info("Loading Graph")
self.graphs.append(nk.graphio.EdgeListReader('\t', 0, '#').read(self.instance['inputPathGraph']))
#self.graphs.append(nk.graphio.SNAPGraphReader().read(self.instance['inputPathGraph']))
logging.info("Loading Graph: Completed")
edgeListName = self.instance['inputPathGraph'].split('/')[-1]
self.names.append(edgeListName)
self.communities.append(communities)
self.parameters.append(" ")
# Method that load the datasets and run the experiments
def runSynteticExperiments(self):
for elem in self.instance['graphs']:
# listOfParameters = []
#
# for parameter in self.instance['graphs'][elem]:
#
# listOfParameters.append(parameter)
#
# parametersCombination =list(itertools.product(*listOfParameters))
# paraKeys = list(self.instance['graphs'][elem].keys())
# Loading the graphs
if(elem['name'] in ['Erdos-Renyi','ER','er','<NAME>','ErdosRenyi']):
inputPath = "datasets/synthetic/erdos_renyi/"
edgeListName = "Erdos-Renyi.ungraph.txt"
communitiesName = "Erdos-Renyi.all.cmty.txt"
elif(elem['name'] in ['Barabasi-Albert','BA','ba','BarabasiAlbert','Barabasi Albert']):
inputPath = "datasets/synthetic/barabasi_albert/"
edgeListName = "Barabasi-Albert.ungraph.txt"
communitiesName = "Barabasi-Albert.all.cmty.txt"
elif(elem['name'] in ['SBM','sbm','Stochastic-Block-Model']):
inputPath = "datasets/synthetic/sbm/"
edgeListName = "Stochastic-Block-Model.ungraph.txt"
communitiesName = "Stochastic-Block-Model.all.cmty.txt"
parameterKeys = list(elem['parameters'].keys())
name = ""
j = 0
for e,v in elem['parameters'].items():
name += str(e) + str(v)
if(j<len(elem['parameters'])):
name+="/"
j+=1
self.parameters.append(elem['parameters'])
#print(elem['parameters'])
# for para in parametersCombination:
# name = ""
# j = 0
# for e in para:
# name += "_" + str(paraKeys[j]) + "_" + str(e)
# j += 1
inputPathGraph = inputPath + name + edgeListName
inputPathCommunities = inputPath + name + communitiesName
communities = []
index = 0
node_community_mapping = {}
with open(inputPathCommunities, 'r') as f:
data = f.read()
for line in data.split("\n"):
community = []
for elem in line.split("\t"):
community.append(int(elem))
node_community_mapping[int(elem)] = index
communities.append(community)
index +=1
self.graphs.append(nk.graphio.EdgeListReader('\t', 0, '#').read(inputPathGraph))
#self.graphs.append(nk.graphio.SNAPGraphReader().read(inputPathGraph))
self.names.append(edgeListName)
self.communities.append(communities)
self.node_community_mapping.append(node_community_mapping)
def get_graphs(self):
return (self.graphs)
def get_communities(self):
return(self.communities)
def get_node_community_mapping(self):
return (self.node_community_mapping)
def save_results_to_json(self,path = "./"):
if Path(path).is_file():
with open(path, 'a+', encoding='utf-8') as f:
fileHandle = open(path, "r")
lineList = fileHandle.readlines()
fileHandle.close()
if(lineList[-1] != '['):
f.write(',')
with open(path, 'a+', encoding='utf-8') as f:
json.dump(self.results, f, ensure_ascii=False, indent=4)
def define_list_of_jsons(self,path):
if not Path(path).is_file():
with open(path, 'a+', encoding='utf-8') as f:
f.write('[')
def close_list_of_jsons(self,path):
with open(path, 'a+', encoding='utf-8') as f:
f.write(']')
def save_results_to_csv(self,path = "./"):
lista_to_csv = []
for elem in self.results:
for exp in elem['experiments']:
lista_to_csv.append({**elem['parameters'],**exp})
for dic in self.results:
df = | pd.DataFrame(lista_to_csv) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Purpose: Perform automated testing on pdvalidate.
:Platform: Linux/Windows | Python 3.5
:Developer: <NAME>
:Email: <EMAIL>
"""
# pylint: disable=protected-access
# pylint: disable=wrong-import-position
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import datetime
import numpy as np
import pytest
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pdvalidate.validation import ei, \
validate as pdv, \
ValidationWarning
class TestReturnTypes():
strings = pd.Series(['1', '1', 'ab\n', 'a b', 'Ab', 'AB', np.nan])
masks = [pd.Series([False, False, False, True, True, False, False]),
pd.Series([True, True, False, True, True, False, True])]
def test_return_mask_series(self):
assert_series_equal(pdv._get_return_object(self.masks, self.strings, 'mask_series'),
pd.Series([True, True, False, True, True, False, True]))
def test_return_mask_frame(self):
assert_frame_equal(pdv._get_return_object(self.masks, self.strings, 'mask_frame'),
pd.concat(self.masks, axis='columns'))
def test_return_values(self):
assert_series_equal(pdv._get_return_object(self.masks, self.strings, 'values'),
pd.Series([np.nan, np.nan, 'ab\n', np.nan, np.nan, 'AB', np.nan]))
def test_wrong_return_type(self):
with pytest.raises(ValueError):
pdv._get_return_object(self.masks, self.strings, 'wrong return type')
class TestMaskNonconvertible():
mixed = pd.Series([1, 2.3, np.nan, 'abc', | pd.datetime(2014, 1, 7) | pandas.datetime |
import pandas as pd
from pandas.tseries.offsets import DateOffset
FOUR_MINUTE_OFFSET = DateOffset(minutes=4)
HOUR_MINUTE_OFFSET = | DateOffset(hours=1) | pandas.tseries.offsets.DateOffset |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from argparse import ArgumentParser
from os import path
from time import time
from utils import trj2blocks
# MDAnalysis
import MDAnalysis as mda
from MDAnalysis.analysis.hydrogenbonds import hbond_analysis
def parse():
'''Parse command line arguments.
Returns:
Namespace object containing input arguments.
'''
parser = ArgumentParser(description='MDTools: Hydrogen bond analysis')
parser.add_argument('-i', '--input', required=True, type=str,
help='Input .xyz file')
parser.add_argument('-n', '--n_cpu', required=True, type=int,
help='Number of CPUs for parallel processing')
parser.add_argument('-c', '--cell_vectors', required=True, type=float,
help='Lattice vectors in angstroms (a, b, c)', nargs=3)
return parser.parse_args()
def hbonds(u, block):
'''Computes hydrogen bond (HB) statistics.
Args:
u: MDAnalysis Universe object containing trajectory.
block: Range of frames composing block.
Returns:
Accepted and donated hydrogen bond counts and surface separations
'''
# Initialize hydrogen bond analysis
hbonds = hbond_analysis.HydrogenBondAnalysis(
u, d_h_a_angle_cutoff=135, d_a_cutoff=3.5)
hbonds.donors_sel = 'name O'
hbonds.acceptors_sel = 'name O'
hbonds.hydrogens_sel = 'name H'
# Run hydrogen bond analysis
hbonds.run(start=block.start, stop=block.stop, verbose=True)
out = hbonds.results.hbonds
# Select oxygen atoms, initialize output arrays
oxygen = u.select_atoms('name O')
acc_counts = np.zeros((len(block), oxygen.n_atoms))
don_counts = np.zeros((len(block), oxygen.n_atoms))
heights = np.zeros((len(block), oxygen.n_atoms))
for i, ts in enumerate(u.trajectory[block.start:block.stop]):
print('Processing blocks %.1f%%' % (100*i/len(block)), end='\r')
# Get all HBs of current frame
step = out[(out[:, 0] == ts.frame)]
# Loop over each oxygen
for j, idx in enumerate(oxygen.indices):
# Get number of accepted and donated HBs + position along z
don_counts[i, j] = len(step[(step[:, 1] == idx)])
acc_counts[i, j] = len(step[(step[:, 3] == idx)])
heights[i, j] = oxygen[j].position[2]
return np.stack((heights, acc_counts, don_counts))
def main():
args = parse()
input = args.input
n_jobs = args.n_cpu
a, b, c = args.cell_vectors
CURRENT_PATH = path.dirname(path.realpath(__file__))
DATA_PATH = path.normpath(path.join(CURRENT_PATH, path.dirname(input)))
base = path.splitext(path.basename(input))[0]
# Initialize universe (time step 0.5 fs)
u = mda.Universe(input, dt=5e-4)
u.add_TopologyAttr('charges')
u.dimensions = np.array([a, b, c, 90, 90, 90])
# Split trajectory into blocks
blocks = trj2blocks.get_blocks(u, n_jobs)
print('Analyzing...')
results = Parallel(n_jobs=n_jobs)(delayed(hbonds)(
u, block) for block in blocks)
# Concatenate results
results = np.concatenate(results, axis=1)
# Save results (heights, accepted HBs, donated HBs) as .csv
df1 = pd.DataFrame(results[0])
df2 = | pd.DataFrame(results[1]) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
from factor_learning.utils import utils
from factor_learning.dataio.DigitImageTfDataset import DigitImageTfDataset
from factor_learning.dataio.DigitImageTfPairsDataset import DigitImageTfPairsDataset
from subprocess import call
import os
from scipy import linalg
import numpy as np
import cv2
from PIL import Image
import math
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
import torch
import seaborn as sns
from pandas.plotting import scatter_matrix
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.patches import Rectangle, Circle
plt.rcParams.update({'font.size': 14})
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
def visualize_correlation(feat_ij, pose_ij):
data_tensor = torch.cat([feat_ij, pose_ij], 1)
data = data_tensor.data.numpy()
data_frame = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scraping_utils import remove_html_tags
from utils.user_utils import infer_role
from graph.arango_utils import *
import pgeocode
def cast_to_float(v):
try:
return float(v)
except ValueError:
return v
def convert_to_iso8601(text):
date = parser.parse(text)
return date.isoformat()
def load_member_summaries(
source_dir="data_for_graph/members",
filename="company_check",
# concat_uk_sector=False
):
'''
LOAD FLAT FILES OF MEMBER DATA
'''
dfs = []
for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"):
summary_filename = os.path.join(source_dir, membership_level, f"{membership_level}_{filename}.csv")
print ("reading summary from", summary_filename)
dfs.append(pd.read_csv(summary_filename, index_col=0).rename(columns={"database_id": "id"}))
summaries = pd.concat(dfs)
# if concat_uk_sector:
# member_uk_sectors = pd.read_csv(f"{source_dir}/members_to_sector.csv", index_col=0)
# # for col in ("sectors", "divisions", "groups", "classes"):
# # member_uk_sectors[f"UK_{col}"] = member_uk_sectors[f"UK_{col}"].map(ast.literal_eval)
# summaries = summaries.join(member_uk_sectors, on="member_name", how="left")
return summaries
def populate_sectors(
source_dir="data_for_graph",
db=None):
'''
CREATE AND ADD SECTOR(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Sectors", db)
sectors = pd.read_csv(f"{source_dir}/all_sectors.csv", index_col=0)
i = 0
for _, row in sectors.iterrows():
sector_name = row["sector_name"]
print ("creating document for sector", sector_name)
document = {
"_key": str(i),
"name": sector_name,
"sector_name": sector_name,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_commerces(
data_dir="data_for_graph",
db=None):
'''
CREATE AND ADD COMMERCE(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Commerces", db)
commerces = pd.read_csv(f"{data_dir}/all_commerces_with_categories.csv", index_col=0)
commerces = commerces.drop_duplicates("commerce_name")
i = 0
for _, row in commerces.iterrows():
commerce = row["commerce_name"]
category = row["commerce_category"]
print ("creating document for commerce", commerce)
document = {
"_key": str(i),
"name": commerce,
"commerce": commerce,
"category": category,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_members(
cols_of_interest=[
"id",
"member_name",
"website",
"about_company",
"membership_level",
"tenancies",
"badges",
"accreditations",
"sectors", # add to member as list
"buys",
"sells",
"sic_codes",
"directors",
"Cash_figure",
"NetWorth_figure",
"TotalCurrentAssets_figure",
"TotalCurrentLiabilities_figure",
],
db=None):
'''
CREATE AND POPULATE MEMBER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
members = load_member_summaries(concat_uk_sector=False)
members = members[cols_of_interest]
members = members.drop_duplicates("member_name") # ensure no accidental duplicates
members = members.loc[~pd.isnull(members["tenancies"])]
members["about_company"] = members["about_company"].map(remove_html_tags, na_action="ignore")
members = members.sort_values("member_name")
i = 0
for _, row in members.iterrows():
member_name = row["member_name"]
if pd.isnull(member_name):
continue
document = {
"_key" : str(i),
"name": member_name,
**{
k: (row[k].split(separator) if not pd.isnull(row[k]) and k in {"sectors", "buys", "sells"}
else ast.literal_eval(row[k]) if not pd.isnull(row[k]) and k in {
"UK_sectors",
"UK_divisions",
"UK_groups",
"UK_classes",
"sic_codes",
"directors",
}
else cast_to_float(row[k]) if k in {"Cash_figure","NetWorth_figure","TotalCurrentAssets_figure","TotalCurrentLiabilities_figure"}
else row[k] if not | pd.isnull(row[k]) | pandas.isnull |
"""
Author : <NAME>\n
email : <EMAIL>\n
LICENSE : MIT License
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import seaborn as sns
import time
from scipy.signal import butter, sosfiltfilt, sosfreqz
from scipy.signal import spectrogram as spect
from scipy.stats import gaussian_kde
import datetime
from threading import Thread
from msdlib import msdExceptions
import os
sns.set()
pd.plotting.register_matplotlib_converters()
# this is a custom designed progress bar for checking the loop timing. The user should follow this approach to make it work
#with ProgressBar(arr, desc = 'intro of arr', perc = 5) as pbar:
# for i in arr:
# 'your code/task inside the loop'
# pbar.inc()
class ProgressBar():
"""
Inputs:
:arr: iterable, it is the array you will use to run the loop, it can be range(10) or any numpy array or python list or any other iterator
:desc: str, description of the loop, default - 'progress'
:barlen: int, length of the progress bar, default is 40
:front space: int, allowed space for description, default is 20
:tblink_max: float/int indicates maximum interval in seconds between two adjacent blinks, default is .4
:tblink_min: float/int indicates minimum interval in seconds between two adjacent blinks, default is .18
Outputs:
there is no output. It creates a progress bar which shows the loop progress.
"""
def __init__(self, arr, desc='progress', barlen=40, front_space = 20, tblink_max = .3, tblink_min = .18):
self.xlen = len(arr)
self.barlen = barlen
if tblink_max >= 1:
tblink_max = .9
print("'tblink_max' was set to .9 seconds beacuse of exceeding maximum limit!")
self.desc = desc[:front_space] + ' ' + '-' * (front_space - len(desc)) * int(front_space > len(desc))+ ' '
self.barend = ' '*15
self.tblmax = tblink_max
self.tblmin = tblink_min
self.blintv = self.tblmax # blinking interval
self.barincs = [int(self.xlen / self.barlen * (i + 1)) for i in range(self.barlen)]
self.barinc = 0
self.sym = '█' # complete symbol in progress bar
self.non = ' ' # gap symbol in progress bar
self.blsyms = ['|', '/', '-', '\\']
self.bllen = len(self.blsyms)
self.blcnt = 0
self.cnt = 0 # iterative counter for x elements
self.barelap = 0 # iterative counter for progress bar
self.set_barelap() # setting proper next value for self.barinc
self.blink = self.blsyms[0]
self.tcntst = False
self.min_tprint = .1 # minimum time interval for two consecutive bar print
self.tprrec = time.time() - self.min_tprint - 1 # initialized with a bigger time
def __enter__(self, ):
self.tst = time.time()
# multithreading initialization
self.thread = Thread(target = self.blink_func)
self.flblink = True
self.thread.start()
# bar initialization
self.prleftime = 'calculating..'
self.tstack = 0
self.tstackst = time.time()
self.pastime = datetime.timedelta(seconds = 0)
return self
def calc_time(self):
self.pastime = datetime.timedelta(seconds = time.time() - self.tst)
self.leftime = self.pastime * (self.xlen / self.cnt - 1)
self.tstackst = time.time()
self.tstack = 0
self.blintv = self.tblmax - (self.tblmax - self.tblmin) * (self.barelap + 1) / self.barlen
def conv_time(self):
d = self.pastime.days
s = int(self.pastime.seconds + self.tstack)
self.prpastime = '%s'%datetime.timedelta(days = d, seconds = s)
if self.tcntst:
d = self.leftime.days
s = int(self.leftime.seconds - self.tstack)
if d < 0:
d, s = 0, 0
self.prleftime = '%s'%datetime.timedelta(days = d, seconds = s)
def set_barelap(self):
if self.cnt == self.barincs[self.barinc]:
if self.barinc < self.barlen - 1:
self.barinc += 1
while self.barincs[self.barinc] == self.barincs[self.barinc - 1] and self.barinc < self.barlen:
self.barinc += 1
self.barelap = int(self.cnt / self.xlen * self.barlen)
def inc(self):
self.cnt += 1
if not self.tcntst: self.tcntst = True
self.set_barelap()
self.calc_time()
self.conv_time()
self.barprint()
def barprint(self, end = ''):
if time.time() - self.tprrec >= self.min_tprint or not self.flblink:
self.bar = self.sym * self.barelap + self.blink * int(self.flblink) + self.non * (self.barlen - self.barelap - int(self.flblink))
self.pr = self.desc + '[' + self.bar + '] ' + '%d/%d <%3d%%>'%(self.cnt, self.xlen, self.cnt / self.xlen * 100) + ' ( %s'%self.prpastime + ' < %s'%self.prleftime + ' )%s'%(self.barend)
print('\r%s'%self.pr, end = end, flush = False)
self.tprrec = time.time()
def blink_func(self):
while self.flblink:
time.sleep(self.blintv)
self.blcnt += 1
if self.blcnt == self.bllen: self.blcnt = 0
self.blink = self.blsyms[self.blcnt]
# time adjustment part
self.tstack = time.time() - self.tstackst
self.conv_time()
self.barprint()
def __exit__(self, exception_type, exception_value, traceback):
self.flblink = False
time.sleep(self.tblmax)
self.barend = ' Complete!' + ' '*15
self.barprint('\n')
def get_time_estimation(time_st, count_ratio=None, current_ep=None, current_batch=None, total_ep=None, total_batch=None, string_out=True):
"""
This function estimates remaining time inside any loop. he function is prepared for estimating
remaining time in machine learning training with mini-batch.
But it can be used for other purposes also by providing count_ratio input value.
Inputs:
:time_st: time.time() instance indicating the starting time count
:count_ratio: float, ratio of elapsed time at any moment.\n
Must be 0 ~ 1, where 1 will indicate that this is the last iteration of the loop
:current_ep: current epoch count
:current_batch: current batch count in mini-batch training
:total_ep: total epoch in the training model
:total_batch: total batch in the mini-batch training
:string_out: bool, whether to output the elapsed and estimated time in string format or not.
True will output time string
Outputs:
:output time: the ouput can be a single string in format\n
'elapsed hour : elapsed minute : elapsed second < remaining hour : remaining minute : remaining second '\n
if string_out flag is True\n
or it can output 6 integer values in the above order for those 6 elements in the string format\n
"""
if count_ratio is None:
# getting count ratio
total_count = total_ep * total_batch
current_count = current_ep * total_batch + current_batch + 1
count_ratio = current_count / total_count
# calculating time
t = time.time()
elapsed_t = t - time_st
# converting time into H:M:S
# elapsed time calculation
el_h = elapsed_t // 3600
el_m = (elapsed_t - el_h * 3600) // 60
el_s = int(elapsed_t - el_h * 3600 - el_m * 60)
if count_ratio == 0:
if string_out:
return '%d:%02d:%02d < %d:%02d:%02d' % (el_h, el_m, el_s, 0, 0, 0)
else:
return el_h, el_m, el_s, 0, 0, 0
# remaining time calculation
total_t = elapsed_t / count_ratio
rem_t = total_t - elapsed_t
rem_h = rem_t // 3600
rem_m = (rem_t - rem_h * 3600) // 60
rem_s = int(rem_t - rem_h * 3600 - rem_m * 60)
if string_out:
out = '%d:%02d:%02d < %d:%02d:%02d' % (el_h, el_m, el_s, rem_h, rem_m, rem_s)
return out
else:
return el_h, el_m, el_s, rem_h, rem_m, rem_s
class Filters():
"""
This class is used to apply FIR filters as high-pass, low-pass, band-pass and band-stop filters.
It also shows the proper graphical representation of the filter response, signal before filtering and after filtering and filter spectram.
The core purpose of this class is to make the filtering process super easy and check filter response comfortably.
Inputs:
:T: float, indicating the sampling period of the signal, must be in seconds (doesnt have any default values)
:n: int, indicating number of fft frequency bins, default is 1000
:savepath: str, path to the directory to store the plot, default is None (doesnt save plots)
:show: bool, whether to show the plot or not, default is True (Shows figures)
:save: bool, whether to store the plot or not, default is False (doesnt save figures)
"""
def __init__(self, T, N = 1000, savepath=None, save=False, show=True):
# T must be in seconds
self.fs = 1 / T
self.N = N
self.savepath = savepath
self.show = show
self.save = save
def raise_cutoff_error(self, msg):
raise msdExceptions.CutoffError(msg)
def raise_filter_error(self, msg):
raise msdExceptions.FilterTypeError(msg)
def vis_spectrum(self, sr, f_lim=[], see_neg=False, show=None, save=None, savepath=None, figsize=(30, 3)):
"""
The purpose of this function is to produce spectrum of time series signal 'sr'.
Inputs:
:sr: numpy ndarray or pandas Series, indicating the time series signal you want to check the spectrum for
:f_lim: python list of len 2, indicating the limits of visualizing frequency spectrum. Default is []
:see_neg: bool, flag indicating whether to check negative side of the spectrum or not. Default is False
:show: bool, whether to show the plot or not, default is None (follows Filters.show attribute)
:save: bool, whether to store the plot or not, default is None (follows Filters.save attribute)
:savepath: str, path to the directory to store the plot, default is None (follows Filters.savepath attribute)
:figsize: tuple, size of the figure plotted to show the fft version of the signal. Default is (30, 3)
Outputs:
doesnt return anything as the purpose is to generate plots
"""
if savepath is None: savepath = self.savepath
if save is None: save = self.save
if show is None: show = self.show
if isinstance(sr, np.ndarray) or isinstance(sr, list): sr = pd.Series(sr)
if sr.name is None: sr.name = 'signal'
if see_neg:
y = np.fft.fft(sr.dropna().values, n = self.N).real
y = np.fft.fftshift(y)
f = (np.arange(self.N) / self.N - .5) * self.fs
y = pd.Series(y, index = f)
else:
y = np.fft.fft(sr.dropna().values, n = self.N * 2).real
f = np.arange(2 * self.N) / (2 * self.N) * self.fs
y = pd.Series(y, index = f)
y = y.iloc[:self.N]
fig, ax = plt.subplots(figsize = figsize)
ax.plot(y.index, y.values, alpha = 1)
fig_title = 'Frequency Spectrum of %s'%sr.name
ax.set_title(fig_title)
ax.set_ylabel('Power')
ax.set_xlabel('Frequency (Hz)')
if len(f_lim) == 2: ax.set_xlim(f_lim[0], f_lim[1])
fig.tight_layout()
if show:
plt.show()
if save and savepath is not None:
os.makedirs(savepath, exist_ok=True)
fig.savefig('%s/%s.jpg'%(savepath, fig_title.replace(' ', '_')), bbox_inches='tight')
plt.close()
def apply(self, sr, filt_type, f_cut, order=10, response=False, plot=False, f_lim=[], savepath=None, show=None, save=None):
"""
The purpose of this function is to apply an FIR filter on a time series signal 'sr' and get the output filtered signal y
Inputs:
:sr: numpy ndarray/pandas Series/list, indicating the time series signal you want to apply the filter on
:filt_type: str, indicating the type of filter you want to apply on sr.\n
{'lp', 'low_pass', 'low pass', 'lowpass'} for applying low pass filter\n
and similar for 'high pass', 'band pass' and 'band stop' filters
:f_cut: float/list/numpy 1d array, n indicating cut off frequencies. Please follow the explanations bellow\n
for lowpass/highpass filters, it must be int/float\n
for bandpass/bandstop filters, it must be a list or numpy array of length divisible by 2
:order: int, filter order, the more the values, the sharp edges at the expense of complexer computation. Default is 10.
:response: bool, whether to check the frequency response of the filter or not, Default is False
:plot: bool, whether to see the spectrum and time series plot of the filtered signal or not, Default is False
:f_lim: list of length 2, frequency limit for the plot. Default is []
:savepath: str, path to the directory to store the plot, default is None (follows Filters.savepath attribute)
:show: bool, whether to show the plot or not, default is None (follows Filters.show attribute)
:save: bool, whether to store the plot or not, default is None (follows Filters.save attribute)
Outputs:
:y: pandas Series, filtered output signal
"""
if savepath is None: savepath = self.savepath
if save is None: save = self.save
if show is None: show = self.show
if isinstance(sr, np.ndarray) or isinstance(sr, list): sr = | pd.Series(sr) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = | pd.timedelta_range('1 days', periods=5) | pandas.timedelta_range |
import pandas as pd
import numpy as np
import os
from numba import types
from numba.typed import Dict
from numba import njit
from openbatlib import model
from openbatlib import view
class Error(Exception):
pass
class InputError(Error):
def __init__(self, expression):
self.expression = expression
class Controller(object):
"""Class to manage the models and view components
"""
_version = '0.1'
def __init__(self):
"""Constructor method
"""
self.view = view.View()
# get path to working directory
self.cwd = os.getcwd()
def sim(self, fparameter=None, freference=None, system=None, ref_case=None, dt=1, spi=False):
"""Method for managing the simulation
:param fparameter: File path to the system parameters
:type fparameter: string
:param system: Identifier for the system under simulation in the file
:type system: string
:param ref_case: Identifier for to chose one of the two reference cases
:type ref_case: string
:param dt: time step width in seconds
:type dt: integer
"""
if fparameter is None:
# set path to the reference case file
fparameter = os.path.join(self.cwd, 'parameter/PerModPAR.xlsx')
if freference is None:
# set path to the reference case file
freference = os.path.join(self.cwd, 'reference_case/ref_case_data.npz')
try:
# Load system parameters
parameter = self._load_parameter(fparameter, system)
if not parameter['ref_1'] and ref_case == '1':
raise InputError('System not suitable with selected reference case 1!')
if not parameter['ref_2'] and ref_case == '2':
raise InputError('System not suitable with selected reference case 2!')
except InputError as err:
raise
# Load PV generator input
ppv = self._load_pv_input(freference, 'ppv')
# Load data from reference cases (load and inverter parameters)
parameter, pl = self._load_ref_case(parameter, freference, fparameter, ref_case)
# Call model for AC coupled systems
if parameter['Top'] == 'AC':
d = model.transform_dict_to_array(parameter)
self.model = model.BatModAC(parameter, d, ppv, pl, dt)
self.model.simulation()
self.model.bat_mod_res()
self.model.calculate_spi()
# Call model for DC coupled systems
elif parameter['Top'] == 'DC':
d = model.transform_dict_to_array(parameter)
self.model = model.BatModDC(parameter, d, ppv, pl, dt)
self.model.simulation()
self.model.bat_mod_res()
self.model.calculate_spi()
# Call model for PV-coupled systems
elif parameter['Top'] == 'PV':
Pac, Ppv, Pperi = model.max_self_consumption(parameter, ppv, pl, pvmod=True)
d = model.transform_dict_to_array(parameter)
self.model = model.BatModPV(parameter, d, ppv, pl, Pac, Ppv, Pperi, dt)
# Load the view class
self.view = view.View()
def modbus(self, host, port, unit_id, data_frame, ref_case, dt, fname, fparameter, fref, system):
"""Function to establish a connection to a battery system via ModBus protocol
:param host: IP-Address of the host
:type host: string
:param port: Port of the host
:type port: integer
:param unit_id: Unit-ID of the host
:type unit_id: integer
:param data_frame: Data Frame holding the values
:type data_frame: pandas data frame
:param ref_case: Identifier for one of the two reference cases
:type ref_case: string
:param dt: Time step width in seconds
:type dt: integer
:param fname: File path to the system under simulation
:type fname: string
:param fparameter: File path to the system parameters
:type fparameter: string
:param fref: File to the refence cases
:type fref: string
:param system: Indentifier for the system under simulation
:type system: string
"""
parameter = self._load_parameter(fparameter, system)
#df_resample = model.resample_data_frame(df=data_frame)
ppv = data_frame['ppv'].to_numpy()
pl = data_frame['L'].to_numpy()
parameter, pl_not_used = self._load_ref_case(parameter, fref, fparameter, ref_case)
Pr, Ppv_not_used, Ppvs_not_used, Pperi_not_used = model.max_self_consumption(parameter, ppv, pl, pvmod=True)
Pr = Pr * -1 # negative values for charging, positive values for discharging
self.model = model.ModBus(host, port, unit_id, Pr, dt, fname)
def real_time(self, parameter, **kwargs):
"""Function for direct access to the battery models
:param parameter: PV battery system parameters
:type parameter: dict
:return r: Dictionary of the simulation results
:rtype r: dict
"""
r = dict() # Dictionary storing the results
if parameter['Top'] == 'AC':
d = self._dict_to_array(parameter)
r['Pbat'], r['Pbs'], r['soc'], r['soc0'], r['Pbs0'] = model.BatMod_AC(d, **kwargs)
return r
elif type == 'DC':
d = self._dict_to_array(parameter)
r['Ppv2ac_out'], r['Ppv2bat_in'], r['Ppv2bat_in0'], r['Pbat2ac_out'], r['Pbat2ac_out0'],
r['Ppvbs'], r['Pbat'], r['soc'], r['soc0'] = model.BatMod_DC(d, **kwargs)
return r
elif type == 'PV':
d = self._dict_to_array(parameter)
r['_soc'], r['_soc0'], r['_Ppv'], r['_Ppvbs'], r['_Pbat'], r['_Ppv2ac_out'], r['_Pbat2pv_out'],
r['_Ppv2bat_in'] = model.BatMod_PV(d, **kwargs)
return r
def _load_parameter(self, fparameter, system):
"""Loads system parameter
:param fparameter: Path to file
:type fparameter: string
:param system: Indicator for the system
:type system: string
"""
parameter = model.load_parameter(fparameter, system)
parameter = model.eta2abc(parameter)
return parameter
def get_residual_power_AC(self, parameter, ppv, pl):
Pr, Ppv, Ppvs, Pperi = model.max_self_consumption(parameter, ppv, pl, pvmod=True)
return Pr
def _dict_to_array(self, parameter):
d = model.transform_dict_to_array(parameter)
return d
def get_parameter(self, fparameter, system):
return self._load_parameter(fparameter, system)
def _load_pv_input(self, fname, name):
"""Loads PV input data
:param fref: Path to file
:type fref: string
:param name: Name of the input series
:type name: string
"""
ppv = model.load_ref_case(fname, name)
return ppv
def _load_set_values(self, fname):
return fname
def _load_ref_case(self, parameter, fname, fparameter, ref_case):
if ref_case == '1':
# Load parameters of first inverter
if parameter['Top'] == 'AC' or parameter['Top'] == 'PV':
inverter_parameter = model.load_parameter(fparameter, 'L')
parameter['P_PV'] = 5.0
pl = model.load_ref_case(fname, 'pl1')
elif ref_case == '2':
# Load paramertes of second inverter
if parameter['Top'] == 'AC' or parameter['Top'] == 'PV':
inverter_parameter = model.load_parameter(fparameter, 'M')
parameter['P_PV'] = 10
pl = model.load_ref_case(fname, 'pl2')
# Load inverter parameters for AC or PV coupled systems
if parameter['Top'] == 'AC' or parameter['Top'] == 'PV':
inverter_parameter = model.eta2abc(inverter_parameter)
parameter['P_PV2AC_in'] = inverter_parameter['P_PV2AC_in']
parameter['P_PV2AC_out']= inverter_parameter['P_PV2AC_out']
parameter['P_PVINV_AC'] = inverter_parameter['P_PVINV_AC']
parameter['PV2AC_a_in'] = inverter_parameter['PV2AC_a_in']
parameter['PV2AC_b_in'] = inverter_parameter['PV2AC_b_in']
parameter['PV2AC_c_in'] = inverter_parameter['PV2AC_c_in']
parameter['PV2AC_a_out'] = inverter_parameter['PV2AC_a_out']
parameter['PV2AC_b_out'] = inverter_parameter['PV2AC_b_out']
parameter['PV2AC_c_out'] = inverter_parameter['PV2AC_c_out']
if parameter['Top'] == 'PV':
parameter['P_SYS_SOC0_AC'] = inverter_parameter['P_PVINV_AC']
return parameter, pl
def print_E(self):
E_real, E_ideal = self.model.get_E()
E_real_df = pd.DataFrame.from_dict(E_real, orient='index', columns=['real / MWh'])
E_ideal_df = | pd.DataFrame.from_dict(E_ideal, orient='index', columns=['ideal / MWh']) | pandas.DataFrame.from_dict |
"""Step 1: Solving the problem in a deterministic manner."""
import cvxpy as cp
import fledge
import numpy as np
import os
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import shutil
def main():
# Settings.
scenario_name = 'course_project_step_1'
results_path = os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), 'results', 'step_1')
run_primal = True
run_dual = True
run_kkt = True
# Clear / instantiate results directory.
try:
if os.path.isdir(results_path):
shutil.rmtree(results_path)
os.mkdir(results_path)
except PermissionError:
pass
# STEP 1.0: SETUP MODELS.
# Read scenario definition into FLEDGE.
# - Data directory from this repository is first added as additional data path.
fledge.config.config['paths']['additional_data'].append(
os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), 'data')
)
fledge.data_interface.recreate_database()
# Obtain data & models.
# Flexible loads.
der_model_set = fledge.der_models.DERModelSet(scenario_name)
# Thermal grid.
thermal_grid_model = fledge.thermal_grid_models.ThermalGridModel(scenario_name)
thermal_grid_model.cooling_plant_efficiency = 10.0 # Change model parameter to incentivize use of thermal grid.
thermal_power_flow_solution_reference = fledge.thermal_grid_models.ThermalPowerFlowSolution(thermal_grid_model)
linear_thermal_grid_model = (
fledge.thermal_grid_models.LinearThermalGridModel(thermal_grid_model, thermal_power_flow_solution_reference)
)
# Define arbitrary operation limits.
node_head_vector_minimum = 1.5 * thermal_power_flow_solution_reference.node_head_vector
branch_flow_vector_maximum = 10.0 * thermal_power_flow_solution_reference.branch_flow_vector
# Electric grid.
electric_grid_model = fledge.electric_grid_models.ElectricGridModelDefault(scenario_name)
power_flow_solution_reference = fledge.electric_grid_models.PowerFlowSolutionFixedPoint(electric_grid_model)
linear_electric_grid_model = (
fledge.electric_grid_models.LinearElectricGridModelGlobal(electric_grid_model, power_flow_solution_reference)
)
# Define arbitrary operation limits.
node_voltage_magnitude_vector_minimum = 0.5 * np.abs(electric_grid_model.node_voltage_vector_reference)
node_voltage_magnitude_vector_maximum = 1.5 * np.abs(electric_grid_model.node_voltage_vector_reference)
branch_power_magnitude_vector_maximum = 10.0 * electric_grid_model.branch_power_vector_magnitude_reference
# Energy price.
price_data = fledge.data_interface.PriceData(scenario_name)
# Obtain time step index shorthands.
scenario_data = fledge.data_interface.ScenarioData(scenario_name)
timesteps = scenario_data.timesteps
timestep_interval_hours = (timesteps[1] - timesteps[0]) / pd.Timedelta('1h')
# Invert sign of losses.
# - Power values of loads are negative by convention. Hence, sign of losses should be negative for power balance.
# Thermal grid.
linear_thermal_grid_model.sensitivity_pump_power_by_der_power *= -1.0
linear_thermal_grid_model.thermal_power_flow_solution.pump_power *= -1.0
# Electric grid.
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active *= -1.0
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive *= -1.0
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active *= -1.0
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive *= -1.0
linear_electric_grid_model.power_flow_solution.loss *= -1.0
# Apply base power / voltage scaling.
# - Scale values to avoid numerical issues.
base_power = 1e6 # in MW.
base_voltage = 1e3 # in kV.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
der_model.mapping_active_power_by_output *= 1 / base_power
der_model.mapping_reactive_power_by_output *= 1 / base_power
der_model.mapping_thermal_power_by_output *= 1 / base_power
# Thermal grid.
linear_thermal_grid_model.sensitivity_node_head_by_der_power *= base_power
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power *= base_power
linear_thermal_grid_model.sensitivity_pump_power_by_der_power *= 1
# Electric grid.
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active *= base_power / base_voltage
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive *= base_power / base_voltage
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive *= 1
linear_electric_grid_model.power_flow_solution.der_power_vector *= 1 / base_power
linear_electric_grid_model.power_flow_solution.branch_power_vector_1 *= 1 / base_power
linear_electric_grid_model.power_flow_solution.branch_power_vector_2 *= 1 / base_power
linear_electric_grid_model.power_flow_solution.loss *= 1 / base_power
linear_electric_grid_model.power_flow_solution.node_voltage_vector *= 1 / base_voltage
# Limits.
node_voltage_magnitude_vector_minimum /= base_voltage
node_voltage_magnitude_vector_maximum /= base_voltage
branch_power_magnitude_vector_maximum /= base_power
# Energy price.
# - Conversion of price values from S$/kWh to S$/p.u. for convenience. Currency S$ is SGD.
# - Power values of loads are negative by convention. Hence, sign of price values is inverted here.
price_data.price_timeseries *= -1.0 * base_power / 1e3 * timestep_interval_hours
# STEP 1.1: SOLVE PRIMAL PROBLEM.
if run_primal or run_kkt: # Primal constraints are also needed for KKT problem.
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
primal_problem = fledge.utils.OptimizationProblem()
# Define variables.
# Flexible loads: State space vectors.
# - CVXPY only allows for 2-dimensional variables. Using dicts below to represent 3rd dimension.
primal_problem.state_vector = dict.fromkeys(der_model_set.flexible_der_names)
primal_problem.control_vector = dict.fromkeys(der_model_set.flexible_der_names)
primal_problem.output_vector = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
primal_problem.state_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].states)
))
)
primal_problem.control_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].controls)
))
)
primal_problem.output_vector[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
))
)
# Flexible loads: Power vectors.
primal_problem.der_thermal_power_vector = (
cp.Variable((len(timesteps), len(thermal_grid_model.ders)))
)
primal_problem.der_active_power_vector = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
primal_problem.der_reactive_power_vector = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
# Source variables.
primal_problem.source_thermal_power = cp.Variable((len(timesteps), 1))
primal_problem.source_active_power = cp.Variable((len(timesteps), 1))
primal_problem.source_reactive_power = cp.Variable((len(timesteps), 1))
# Define constraints.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
# Initial state.
primal_problem.constraints.append(
primal_problem.state_vector[der_model.der_name][0, :]
==
der_model.state_vector_initial.values
)
# State equation.
primal_problem.constraints.append(
primal_problem.state_vector[der_model.der_name][1:, :]
==
cp.transpose(
der_model.state_matrix.values
@ cp.transpose(primal_problem.state_vector[der_model.der_name][:-1, :])
+ der_model.control_matrix.values
@ cp.transpose(primal_problem.control_vector[der_model.der_name][:-1, :])
+ der_model.disturbance_matrix.values
@ np.transpose(der_model.disturbance_timeseries.iloc[:-1, :].values)
)
)
# Output equation.
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
==
cp.transpose(
der_model.state_output_matrix.values
@ cp.transpose(primal_problem.state_vector[der_model.der_name])
+ der_model.control_output_matrix.values
@ cp.transpose(primal_problem.control_vector[der_model.der_name])
+ der_model.disturbance_output_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values)
)
)
# Output limits.
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
>=
der_model.output_minimum_timeseries.values
)
primal_problem.constraints.append(
primal_problem.output_vector[der_model.der_name]
<=
der_model.output_maximum_timeseries.replace(np.inf, 1e3).values
)
# Power mapping.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=der_model.der_name))
primal_problem.constraints.append(
primal_problem.der_active_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_active_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
primal_problem.constraints.append(
primal_problem.der_reactive_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_reactive_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
primal_problem.constraints.append(
primal_problem.der_thermal_power_vector[:, [der_index]]
==
cp.transpose(
der_model.mapping_thermal_power_by_output.values
@ cp.transpose(primal_problem.output_vector[der_model.der_name])
)
)
# Thermal grid.
# Node head limit.
primal_problem.constraints.append(
np.array([node_head_vector_minimum.ravel()])
<=
cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
)
# Branch flow limit.
primal_problem.constraints.append(
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
<=
np.array([branch_flow_vector_maximum.ravel()])
)
# Power balance.
primal_problem.constraints.append(
thermal_grid_model.cooling_plant_efficiency ** -1
* (
primal_problem.source_thermal_power
+ cp.sum(-1.0 * (
primal_problem.der_thermal_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
)
==
cp.transpose(
linear_thermal_grid_model.sensitivity_pump_power_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
)
)
# Electric grid.
# Voltage limits.
primal_problem.constraints.append(
np.array([node_voltage_magnitude_vector_minimum.ravel()])
<=
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([node_voltage_magnitude_vector_maximum.ravel()])
)
# Branch flow limits.
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([branch_power_magnitude_vector_maximum.ravel()])
)
primal_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([branch_power_magnitude_vector_maximum.ravel()])
)
# Power balance.
primal_problem.constraints.append(
primal_problem.source_active_power
+ cp.sum(-1.0 * (
primal_problem.der_active_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
==
np.real(linear_electric_grid_model.power_flow_solution.loss)
+ cp.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
primal_problem.constraints.append(
primal_problem.source_reactive_power
+ cp.sum(-1.0 * (
primal_problem.der_reactive_power_vector
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
==
np.imag(linear_electric_grid_model.power_flow_solution.loss)
+ cp.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
# Define objective.
primal_problem.objective += (
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
@ primal_problem.source_thermal_power
* thermal_grid_model.cooling_plant_efficiency ** -1
)
primal_problem.objective += (
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
@ primal_problem.source_active_power
)
if run_primal:
# Solve problem.
fledge.utils.log_time('primal solution')
primal_problem.solve()
fledge.utils.log_time('primal solution')
# Obtain results.
# Flexible loads.
primal_state_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.states)
primal_control_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.controls)
primal_output_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
for der_name in der_model_set.flexible_der_names:
primal_state_vector.loc[:, (der_name, slice(None))] = (
primal_problem.state_vector[der_name].value
)
primal_control_vector.loc[:, (der_name, slice(None))] = (
primal_problem.control_vector[der_name].value
)
primal_output_vector.loc[:, (der_name, slice(None))] = (
primal_problem.output_vector[der_name].value
)
# Thermal grid.
primal_der_thermal_power_vector = (
pd.DataFrame(
primal_problem.der_thermal_power_vector.value,
columns=linear_thermal_grid_model.thermal_grid_model.ders,
index=timesteps
)
)
primal_source_thermal_power = (
pd.DataFrame(
primal_problem.source_thermal_power.value,
columns=['total'],
index=timesteps
)
)
# Electric grid.
primal_der_active_power_vector = (
pd.DataFrame(
primal_problem.der_active_power_vector.value,
columns=linear_electric_grid_model.electric_grid_model.ders,
index=timesteps
)
)
primal_der_reactive_power_vector = (
pd.DataFrame(
primal_problem.der_reactive_power_vector.value,
columns=linear_electric_grid_model.electric_grid_model.ders,
index=timesteps
)
)
primal_source_active_power = (
pd.DataFrame(
primal_problem.source_active_power.value,
columns=['total'],
index=timesteps
)
)
primal_source_reactive_power = (
pd.DataFrame(
primal_problem.source_reactive_power.value,
columns=['total'],
index=timesteps
)
)
# Additional results.
primal_node_head_vector = (
pd.DataFrame(
cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
).value,
index=timesteps,
columns=thermal_grid_model.nodes
)
)
primal_branch_flow_vector = (
pd.DataFrame(
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(primal_problem.der_thermal_power_vector)
).value,
index=timesteps,
columns=thermal_grid_model.branches
)
)
primal_node_voltage_vector = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.nodes
)
)
primal_branch_power_vector_1 = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.branches
)
)
primal_branch_power_vector_2 = (
pd.DataFrame(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
primal_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
primal_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
).value,
index=timesteps,
columns=electric_grid_model.branches
)
)
primal_node_head_vector_per_unit = (
primal_node_head_vector
/ thermal_grid_model.node_head_vector_reference
)
primal_branch_flow_vector_per_unit = (
primal_branch_flow_vector
/ thermal_grid_model.branch_flow_vector_reference
)
primal_node_voltage_vector_per_unit = (
primal_node_voltage_vector * base_voltage
/ np.abs(electric_grid_model.node_voltage_vector_reference)
)
primal_branch_power_vector_1_per_unit = (
primal_branch_power_vector_1 * base_power
/ electric_grid_model.branch_power_vector_magnitude_reference
)
primal_branch_power_vector_2_per_unit = (
primal_branch_power_vector_2 * base_power
/ electric_grid_model.branch_power_vector_magnitude_reference
)
# Store results.
primal_state_vector.to_csv(os.path.join(results_path, 'primal_state_vector.csv'))
primal_control_vector.to_csv(os.path.join(results_path, 'primal_control_vector.csv'))
primal_output_vector.to_csv(os.path.join(results_path, 'primal_output_vector.csv'))
primal_der_thermal_power_vector.to_csv(os.path.join(results_path, 'primal_der_thermal_power_vector.csv'))
primal_source_thermal_power.to_csv(os.path.join(results_path, 'primal_source_thermal_power.csv'))
primal_der_active_power_vector.to_csv(os.path.join(results_path, 'primal_der_active_power_vector.csv'))
primal_der_reactive_power_vector.to_csv(os.path.join(results_path, 'primal_der_reactive_power_vector.csv'))
primal_source_active_power.to_csv(os.path.join(results_path, 'primal_source_active_power.csv'))
primal_source_reactive_power.to_csv(os.path.join(results_path, 'primal_source_reactive_power.csv'))
primal_node_head_vector.to_csv(os.path.join(results_path, 'primal_node_head_vector.csv'))
primal_branch_flow_vector.to_csv(os.path.join(results_path, 'primal_branch_flow_vector.csv'))
primal_node_voltage_vector.to_csv(os.path.join(results_path, 'primal_node_voltage_vector.csv'))
primal_branch_power_vector_1.to_csv(os.path.join(results_path, 'primal_branch_power_vector_1.csv'))
primal_branch_power_vector_2.to_csv(os.path.join(results_path, 'primal_branch_power_vector_2.csv'))
primal_node_head_vector_per_unit.to_csv(os.path.join(results_path, 'primal_node_head_vector_per_unit.csv'))
primal_branch_flow_vector_per_unit.to_csv(os.path.join(results_path, 'primal_branch_flow_vector_per_unit.csv'))
primal_node_voltage_vector_per_unit.to_csv(os.path.join(results_path, 'primal_node_voltage_vector_per_unit.csv'))
primal_branch_power_vector_1_per_unit.to_csv(os.path.join(results_path, 'primal_branch_power_vector_1_per_unit.csv'))
primal_branch_power_vector_2_per_unit.to_csv(os.path.join(results_path, 'primal_branch_power_vector_2_per_unit.csv'))
# Obtain variable count / dimensions.
primal_variable_count = (
sum(np.multiply(*primal_problem.state_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*primal_problem.control_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*primal_problem.output_vector[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ np.multiply(*primal_problem.der_thermal_power_vector.shape)
+ np.multiply(*primal_problem.der_active_power_vector.shape)
+ np.multiply(*primal_problem.der_reactive_power_vector.shape)
+ np.multiply(*primal_problem.source_thermal_power.shape)
+ np.multiply(*primal_problem.source_active_power.shape)
+ np.multiply(*primal_problem.source_reactive_power.shape)
)
print(f"primal_variable_count = {primal_variable_count}")
# Print objective.
primal_objective = pd.Series(primal_problem.objective.value, index=['primal_objective'])
primal_objective.to_csv(os.path.join(results_path, 'primal_objective.csv'))
print(f"primal_objective = {primal_objective.values}")
# STEP 1.2: SOLVE DUAL PROBLEM.
if run_dual or run_kkt: # Primal constraints are also needed for KKT problem.
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
dual_problem = fledge.utils.OptimizationProblem()
# Define variables.
# Flexible loads: State space equations.
# - CVXPY only allows for 2-dimensional variables. Using dicts below to represent 3rd dimension.
dual_problem.lambda_initial_state_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.lambda_state_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.lambda_output_equation = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.mu_output_minimum = dict.fromkeys(der_model_set.flexible_der_names)
dual_problem.mu_output_maximum = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
dual_problem.lambda_initial_state_equation[der_name] = (
cp.Variable((
1,
len(der_model_set.flexible_der_models[der_name].states)
))
)
dual_problem.lambda_state_equation[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps[:-1]),
len(der_model_set.flexible_der_models[der_name].states)
))
)
dual_problem.lambda_output_equation[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
))
)
dual_problem.mu_output_minimum[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
), nonneg=True)
)
dual_problem.mu_output_maximum[der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
), nonneg=True)
)
# Flexible loads: Power equations.
dual_problem.lambda_thermal_power_equation = (
cp.Variable((len(timesteps), len(thermal_grid_model.ders)))
)
dual_problem.lambda_active_power_equation = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
dual_problem.lambda_reactive_power_equation = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
# Thermal grid.
dual_problem.mu_node_head_minium = (
cp.Variable((len(timesteps), len(thermal_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_branch_flow_maximum = (
cp.Variable((len(timesteps), len(thermal_grid_model.branches)), nonneg=True)
)
dual_problem.lambda_pump_power_equation = (
cp.Variable((len(timesteps), 1))
)
# Electric grid.
dual_problem.mu_node_voltage_magnitude_minimum = (
cp.Variable((len(timesteps), len(electric_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_node_voltage_magnitude_maximum = (
cp.Variable((len(timesteps), len(electric_grid_model.nodes)), nonneg=True)
)
dual_problem.mu_branch_power_magnitude_maximum_1 = (
cp.Variable((len(timesteps), len(electric_grid_model.branches)), nonneg=True)
)
dual_problem.mu_branch_power_magnitude_maximum_2 = (
cp.Variable((len(timesteps), len(electric_grid_model.branches)), nonneg=True)
)
dual_problem.lambda_loss_active_equation = cp.Variable((len(timesteps), 1))
dual_problem.lambda_loss_reactive_equation = cp.Variable((len(timesteps), 1))
# Define constraints.
for der_model in der_model_set.flexible_der_models.values():
# Differential with respect to state vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_initial_state_equation[der_model.der_name]
- (
dual_problem.lambda_state_equation[der_model.der_name][:1, :]
@ der_model.state_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][:1, :]
@ der_model.state_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_state_equation[der_model.der_name][0:-1, :]
- (
dual_problem.lambda_state_equation[der_model.der_name][1:, :]
@ der_model.state_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][1:-1, :]
@ der_model.state_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_state_equation[der_model.der_name][-1:, :]
- (
dual_problem.lambda_output_equation[der_model.der_name][-1:, :]
@ der_model.state_output_matrix.values
)
)
)
# Differential with respect to control vector.
dual_problem.constraints.append(
0.0
==
(
- (
dual_problem.lambda_state_equation[der_model.der_name]
@ der_model.control_matrix.values
)
- (
dual_problem.lambda_output_equation[der_model.der_name][:-1, :]
@ der_model.control_output_matrix.values
)
)
)
dual_problem.constraints.append(
0.0
==
(
- (
dual_problem.lambda_output_equation[der_model.der_name][-1:, :]
@ der_model.control_output_matrix.values
)
)
)
# Differential with respect to output vector.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=der_model.der_name))
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_output_equation[der_model.der_name]
- dual_problem.mu_output_minimum[der_model.der_name]
+ dual_problem.mu_output_maximum[der_model.der_name]
- (
dual_problem.lambda_thermal_power_equation[:, [der_index]]
@ der_model.mapping_thermal_power_by_output.values
)
- (
dual_problem.lambda_active_power_equation[:, [der_index]]
@ der_model.mapping_active_power_by_output.values
)
- (
dual_problem.lambda_reactive_power_equation[:, [der_index]]
@ der_model.mapping_reactive_power_by_output.values
)
)
)
# Differential with respect to thermal power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_thermal_power_equation
- (
dual_problem.mu_node_head_minium
@ linear_thermal_grid_model.sensitivity_node_head_by_der_power
)
+ (
dual_problem.mu_branch_flow_maximum
@ linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
)
- (
dual_problem.lambda_pump_power_equation
@ (
thermal_grid_model.cooling_plant_efficiency ** -1
* np.ones(linear_thermal_grid_model.sensitivity_pump_power_by_der_power.shape)
+ linear_thermal_grid_model.sensitivity_pump_power_by_der_power
)
)
)
)
# Differential with respect to active power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_active_power_equation
- (
dual_problem.mu_node_voltage_magnitude_minimum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
)
+ (
dual_problem.mu_node_voltage_magnitude_maximum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_1
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_2
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
)
- (
dual_problem.lambda_loss_active_equation
@ (
np.ones(linear_electric_grid_model.sensitivity_loss_active_by_der_power_active.shape)
+ linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
)
)
- (
dual_problem.lambda_loss_reactive_equation
@ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
)
)
)
# Differential with respect to reactive power vector.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_reactive_power_equation
- (
dual_problem.mu_node_voltage_magnitude_minimum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_node_voltage_magnitude_maximum
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_1
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
)
+ (
dual_problem.mu_branch_power_magnitude_maximum_2
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
)
- (
dual_problem.lambda_loss_active_equation
@ linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
)
- (
dual_problem.lambda_loss_reactive_equation
@ (
np.ones(linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive.shape)
+ linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
)
)
)
)
# Differential with respect to thermal source power.
dual_problem.constraints.append(
0.0
==
(
np.transpose([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
+ dual_problem.lambda_pump_power_equation
)
)
# Differential with respect to active source power.
dual_problem.constraints.append(
0.0
==
(
np.transpose([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
+ dual_problem.lambda_loss_active_equation
)
)
# Differential with respect to active source power.
dual_problem.constraints.append(
0.0
==
(
dual_problem.lambda_loss_reactive_equation
)
)
if run_dual:
# Define objective.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_initial_state_equation[der_model.der_name],
np.array([der_model.state_vector_initial.values])
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_state_equation[der_model.der_name],
cp.transpose(
der_model.disturbance_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values[:-1, :])
)
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.lambda_output_equation[der_model.der_name],
cp.transpose(
der_model.disturbance_output_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_output_minimum[der_model.der_name],
der_model.output_minimum_timeseries.values
))
)
dual_problem.objective += (
-1.0
* cp.sum(cp.multiply(
dual_problem.mu_output_maximum[der_model.der_name],
der_model.output_maximum_timeseries.replace(np.inf, 1e3).values
))
)
# Thermal grid.
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_head_minium,
(
np.array([node_head_vector_minimum])
# - node_head_vector_reference
# + (
# linear_thermal_grid_model.sensitivity_node_head_by_der_power
# @ der_thermal_power_vector_reference
# )
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_flow_maximum,
(
# - branch_flow_vector_reference
# + (
# linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
# @ der_thermal_power_vector_reference
# )
- 1.0
* np.array([branch_flow_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_pump_power_equation,
(
0.0
# - pump_power_reference
# + (
# linear_thermal_grid_model.sensitivity_pump_power_by_der_power
# @ der_thermal_power_vector_reference
# )
)
))
)
# Electric grid.
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_voltage_magnitude_minimum,
(
np.array([node_voltage_magnitude_vector_minimum])
- np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
+ np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_node_voltage_magnitude_maximum,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
- np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([node_voltage_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_power_magnitude_maximum_1,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1)])
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([branch_power_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.mu_branch_power_magnitude_maximum_2,
(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2)])
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
- np.array([branch_power_magnitude_vector_maximum])
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_loss_active_equation,
(
-1.0
* np.array([np.real(linear_electric_grid_model.power_flow_solution.loss)])
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
dual_problem.objective += (
cp.sum(cp.multiply(
dual_problem.lambda_loss_reactive_equation,
(
-1.0
* np.array([np.imag(linear_electric_grid_model.power_flow_solution.loss)])
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
+ np.transpose(
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
)
)
))
)
# Invert sign of objective for maximisation.
dual_problem.objective *= -1.0
# Solve problem.
fledge.utils.log_time('dual solution')
dual_problem.solve()
fledge.utils.log_time('dual solution')
# Obtain results.
# Flexible loads.
dual_lambda_initial_state_equation = pd.DataFrame(0.0, index=der_model_set.timesteps[:1], columns=der_model_set.states)
dual_lambda_state_equation = pd.DataFrame(0.0, index=der_model_set.timesteps[:-1], columns=der_model_set.states)
dual_lambda_output_equation = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
dual_mu_output_minimum = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
dual_mu_output_maximum = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs)
for der_name in der_model_set.flexible_der_names:
dual_lambda_initial_state_equation.loc[:, (der_name, slice(None))] = (
dual_problem.lambda_initial_state_equation[der_name].value
)
dual_lambda_state_equation.loc[:, (der_name, slice(None))] = (
dual_problem.lambda_state_equation[der_name].value
)
dual_lambda_output_equation.loc[:, (der_name, slice(None))] = (
dual_problem.lambda_output_equation[der_name].value
)
dual_mu_output_minimum.loc[:, (der_name, slice(None))] = (
dual_problem.mu_output_minimum[der_name].value
)
dual_mu_output_maximum.loc[:, (der_name, slice(None))] = (
dual_problem.mu_output_maximum[der_name].value
)
# Flexible loads: Power equations.
dual_lambda_thermal_power_equation = (
pd.DataFrame(
dual_problem.lambda_thermal_power_equation.value,
index=timesteps,
columns=thermal_grid_model.ders
)
)
dual_lambda_active_power_equation = (
pd.DataFrame(
dual_problem.lambda_active_power_equation.value,
index=timesteps,
columns=electric_grid_model.ders
)
)
dual_lambda_reactive_power_equation = (
pd.DataFrame(
dual_problem.lambda_reactive_power_equation.value,
index=timesteps,
columns=electric_grid_model.ders
)
)
# Thermal grid.
dual_mu_node_head_minium = (
pd.DataFrame(
dual_problem.mu_node_head_minium.value,
index=timesteps,
columns=thermal_grid_model.nodes
)
)
dual_mu_branch_flow_maximum = (
pd.DataFrame(
dual_problem.mu_branch_flow_maximum.value,
index=timesteps,
columns=thermal_grid_model.branches
)
)
dual_lambda_pump_power_equation = (
pd.DataFrame(
dual_problem.lambda_pump_power_equation.value,
index=timesteps,
columns=['total']
)
)
# Electric grid.
dual_mu_node_voltage_magnitude_minimum = (
pd.DataFrame(
dual_problem.mu_node_voltage_magnitude_minimum.value,
index=timesteps,
columns=electric_grid_model.nodes
)
)
dual_mu_node_voltage_magnitude_maximum = (
pd.DataFrame(
dual_problem.mu_node_voltage_magnitude_maximum.value,
index=timesteps,
columns=electric_grid_model.nodes
)
)
dual_mu_branch_power_magnitude_maximum_1 = (
pd.DataFrame(
dual_problem.mu_branch_power_magnitude_maximum_1.value,
index=timesteps,
columns=electric_grid_model.branches
)
)
dual_mu_branch_power_magnitude_maximum_2 = (
pd.DataFrame(
dual_problem.mu_branch_power_magnitude_maximum_2.value,
index=timesteps,
columns=electric_grid_model.branches
)
)
dual_lambda_loss_active_equation = (
pd.DataFrame(
dual_problem.lambda_loss_active_equation.value,
index=timesteps,
columns=['total']
)
)
dual_lambda_loss_reactive_equation = (
pd.DataFrame(
dual_problem.lambda_loss_reactive_equation.value,
index=timesteps,
columns=['total']
)
)
# Store results.
dual_lambda_initial_state_equation.to_csv(os.path.join(results_path, 'dual_lambda_initial_state_equation.csv'))
dual_lambda_state_equation.to_csv(os.path.join(results_path, 'dual_lambda_state_equation.csv'))
dual_lambda_output_equation.to_csv(os.path.join(results_path, 'dual_lambda_output_equation.csv'))
dual_mu_output_minimum.to_csv(os.path.join(results_path, 'dual_mu_output_minimum.csv'))
dual_mu_output_maximum.to_csv(os.path.join(results_path, 'dual_mu_output_maximum.csv'))
dual_lambda_thermal_power_equation.to_csv(os.path.join(results_path, 'dual_lambda_thermal_power_equation.csv'))
dual_lambda_active_power_equation.to_csv(os.path.join(results_path, 'dual_lambda_active_power_equation.csv'))
dual_lambda_reactive_power_equation.to_csv(os.path.join(results_path, 'dual_lambda_reactive_power_equation.csv'))
dual_mu_node_head_minium.to_csv(os.path.join(results_path, 'dual_mu_node_head_minium.csv'))
dual_mu_branch_flow_maximum.to_csv(os.path.join(results_path, 'dual_mu_branch_flow_maximum.csv'))
dual_lambda_pump_power_equation.to_csv(os.path.join(results_path, 'dual_lambda_pump_power_equation.csv'))
dual_mu_node_voltage_magnitude_minimum.to_csv(os.path.join(results_path, 'dual_mu_node_voltage_magnitude_minimum.csv'))
dual_mu_node_voltage_magnitude_maximum.to_csv(os.path.join(results_path, 'dual_mu_node_voltage_magnitude_maximum.csv'))
dual_mu_branch_power_magnitude_maximum_1.to_csv(os.path.join(results_path, 'dual_mu_branch_power_magnitude_maximum_1.csv'))
dual_mu_branch_power_magnitude_maximum_2.to_csv(os.path.join(results_path, 'dual_mu_branch_power_magnitude_maximum_2.csv'))
dual_lambda_loss_active_equation.to_csv(os.path.join(results_path, 'dual_lambda_loss_active_equation.csv'))
dual_lambda_loss_reactive_equation.to_csv(os.path.join(results_path, 'dual_lambda_loss_reactive_equation.csv'))
# Obtain variable count / dimensions.
dual_variable_count = (
sum(np.multiply(*dual_problem.lambda_initial_state_equation[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*dual_problem.lambda_state_equation[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*dual_problem.lambda_output_equation[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*dual_problem.mu_output_minimum[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ sum(np.multiply(*dual_problem.mu_output_maximum[der_name].shape) for der_name in der_model_set.flexible_der_names)
+ np.multiply(*dual_problem.lambda_thermal_power_equation.shape)
+ np.multiply(*dual_problem.lambda_active_power_equation.shape)
+ np.multiply(*dual_problem.lambda_reactive_power_equation.shape)
+ np.multiply(*dual_problem.mu_node_head_minium.shape)
+ np.multiply(*dual_problem.mu_branch_flow_maximum.shape)
+ np.multiply(*dual_problem.lambda_pump_power_equation.shape)
+ np.multiply(*dual_problem.mu_node_voltage_magnitude_minimum.shape)
+ np.multiply(*dual_problem.mu_node_voltage_magnitude_maximum.shape)
+ np.multiply(*dual_problem.mu_branch_power_magnitude_maximum_1.shape)
+ np.multiply(*dual_problem.mu_branch_power_magnitude_maximum_2.shape)
+ np.multiply(*dual_problem.lambda_loss_active_equation.shape)
+ np.multiply(*dual_problem.lambda_loss_reactive_equation.shape)
)
print(f"dual_variable_count = {dual_variable_count}")
# Print objective.
dual_objective = pd.Series(-1.0 * dual_problem.objective.value, index=['dual_objective'])
dual_objective.to_csv(os.path.join(results_path, 'dual_objective.csv'))
print(f"dual_objective = {dual_objective.values}")
# STEP 1.3: SOLVE KKT CONDITIONS.
if run_kkt:
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
kkt_problem = fledge.utils.OptimizationProblem()
# Obtain primal and dual variables.
# - Since primal and dual variables are part of the KKT conditions, the previous definitions are recycled.
kkt_problem.state_vector = primal_problem.state_vector
kkt_problem.control_vector = primal_problem.control_vector
kkt_problem.output_vector = primal_problem.output_vector
kkt_problem.der_thermal_power_vector = primal_problem.der_thermal_power_vector
kkt_problem.der_active_power_vector = primal_problem.der_active_power_vector
kkt_problem.der_reactive_power_vector = primal_problem.der_reactive_power_vector
kkt_problem.source_thermal_power = primal_problem.source_thermal_power
kkt_problem.source_active_power = primal_problem.source_active_power
kkt_problem.source_reactive_power = primal_problem.source_reactive_power
kkt_problem.lambda_initial_state_equation = dual_problem.lambda_initial_state_equation
kkt_problem.lambda_state_equation = dual_problem.lambda_state_equation
kkt_problem.lambda_output_equation = dual_problem.lambda_output_equation
kkt_problem.mu_output_minimum = dual_problem.mu_output_minimum
kkt_problem.mu_output_maximum = dual_problem.mu_output_maximum
kkt_problem.lambda_thermal_power_equation = dual_problem.lambda_thermal_power_equation
kkt_problem.lambda_active_power_equation = dual_problem.lambda_active_power_equation
kkt_problem.lambda_reactive_power_equation = dual_problem.lambda_reactive_power_equation
kkt_problem.mu_node_head_minium = dual_problem.mu_node_head_minium
kkt_problem.mu_branch_flow_maximum = dual_problem.mu_branch_flow_maximum
kkt_problem.lambda_pump_power_equation = dual_problem.lambda_pump_power_equation
kkt_problem.mu_node_voltage_magnitude_minimum = dual_problem.mu_node_voltage_magnitude_minimum
kkt_problem.mu_node_voltage_magnitude_maximum = dual_problem.mu_node_voltage_magnitude_maximum
kkt_problem.mu_branch_power_magnitude_maximum_1 = dual_problem.mu_branch_power_magnitude_maximum_1
kkt_problem.mu_branch_power_magnitude_maximum_2 = dual_problem.mu_branch_power_magnitude_maximum_2
kkt_problem.lambda_loss_active_equation = dual_problem.lambda_loss_active_equation
kkt_problem.lambda_loss_reactive_equation = dual_problem.lambda_loss_reactive_equation
# Obtain primal and dual constraints.
# - Since primal and dual constraints are part of the KKT conditions, the previous definitions are recycled.
kkt_problem.constraints.extend(primal_problem.constraints)
kkt_problem.constraints.extend(dual_problem.constraints)
# Obtain primal and dual problem objective.
# - For testing / debugging only, since the KKT problem does not technically have any objective.
# kkt_problem.objective = primal_problem.objective
# kkt_problem.objective = dual_problem.objective
# Define complementarity binary variables.
kkt_problem.psi_output_minimum = dict.fromkeys(der_model_set.flexible_der_names)
kkt_problem.psi_output_maximum = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
kkt_problem.psi_output_minimum[der_name] = (
cp.Variable(kkt_problem.mu_output_minimum[der_name].shape, boolean=True)
)
kkt_problem.psi_output_maximum[der_name] = (
cp.Variable(kkt_problem.mu_output_maximum[der_name].shape, boolean=True)
)
kkt_problem.psi_node_head_minium = cp.Variable(kkt_problem.mu_node_head_minium.shape, boolean=True)
kkt_problem.psi_branch_flow_maximum = cp.Variable(kkt_problem.mu_branch_flow_maximum.shape, boolean=True)
kkt_problem.psi_node_voltage_magnitude_minimum = cp.Variable(kkt_problem.mu_node_voltage_magnitude_minimum.shape, boolean=True)
kkt_problem.psi_node_voltage_magnitude_maximum = cp.Variable(kkt_problem.mu_node_voltage_magnitude_maximum.shape, boolean=True)
kkt_problem.psi_branch_power_magnitude_maximum_1 = cp.Variable(kkt_problem.mu_branch_power_magnitude_maximum_1.shape, boolean=True)
kkt_problem.psi_branch_power_magnitude_maximum_2 = cp.Variable(kkt_problem.mu_branch_power_magnitude_maximum_2.shape, boolean=True)
# Define complementarity big M parameters.
# - Big M values are chosen based on expected order of magnitude of constraints from primal / dual solution.
kkt_problem.big_m_output_minimum = cp.Parameter(value=2e4)
kkt_problem.big_m_output_maximum = cp.Parameter(value=2e4)
kkt_problem.big_m_node_head_minium = cp.Parameter(value=1e2)
kkt_problem.big_m_branch_flow_maximum = cp.Parameter(value=1e3)
kkt_problem.big_m_node_voltage_magnitude_minimum = cp.Parameter(value=1e2)
kkt_problem.big_m_node_voltage_magnitude_maximum = cp.Parameter(value=1e2)
kkt_problem.big_m_branch_power_magnitude_maximum_1 = cp.Parameter(value=1e3)
kkt_problem.big_m_branch_power_magnitude_maximum_2 = cp.Parameter(value=1e3)
# Define complementarity constraints.
# Flexible loads.
for der_model in der_model_set.flexible_der_models.values():
# Output limits.
kkt_problem.constraints.append(
-1.0
* (
der_model.output_minimum_timeseries.values
- kkt_problem.output_vector[der_model.der_name]
)
<=
kkt_problem.psi_output_minimum[der_model.der_name]
* kkt_problem.big_m_output_minimum
)
kkt_problem.constraints.append(
kkt_problem.mu_output_minimum[der_model.der_name]
<=
(1 - kkt_problem.psi_output_minimum[der_model.der_name])
* kkt_problem.big_m_output_minimum
)
kkt_problem.constraints.append(
-1.0
* (
kkt_problem.output_vector[der_model.der_name]
- der_model.output_maximum_timeseries.replace(np.inf, 1e4).values
)
<=
kkt_problem.psi_output_maximum[der_model.der_name]
* kkt_problem.big_m_output_maximum
)
kkt_problem.constraints.append(
kkt_problem.mu_output_maximum[der_model.der_name]
<=
(1 - kkt_problem.psi_output_maximum[der_model.der_name])
* kkt_problem.big_m_output_maximum
)
# Thermal grid.
# Node head limit.
kkt_problem.constraints.append(
-1.0
* (
np.array([node_head_vector_minimum.ravel()])
- cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(kkt_problem.der_thermal_power_vector)
)
)
<=
kkt_problem.psi_node_head_minium
* kkt_problem.big_m_node_head_minium
)
kkt_problem.constraints.append(
kkt_problem.mu_node_head_minium
<=
(1 - kkt_problem.psi_node_head_minium)
* kkt_problem.big_m_node_head_minium
)
# Branch flow limit.
kkt_problem.constraints.append(
-1.0
* (
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(kkt_problem.der_thermal_power_vector)
)
- np.array([branch_flow_vector_maximum.ravel()])
)
<=
kkt_problem.psi_branch_flow_maximum
* kkt_problem.big_m_branch_flow_maximum
)
kkt_problem.constraints.append(
kkt_problem.mu_branch_flow_maximum
<=
(1 - kkt_problem.psi_branch_flow_maximum)
* kkt_problem.big_m_branch_flow_maximum
)
# Voltage limits.
kkt_problem.constraints.append(
-1.0
* (
np.array([node_voltage_magnitude_vector_minimum.ravel()])
- np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
- cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
kkt_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
kkt_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
<=
kkt_problem.psi_node_voltage_magnitude_minimum
* kkt_problem.big_m_node_voltage_magnitude_minimum
)
kkt_problem.constraints.append(
kkt_problem.mu_node_voltage_magnitude_minimum
<=
(1 - kkt_problem.psi_node_voltage_magnitude_minimum)
* kkt_problem.big_m_node_voltage_magnitude_minimum
)
kkt_problem.constraints.append(
-1.0
* (
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
kkt_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
kkt_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
- np.array([node_voltage_magnitude_vector_maximum.ravel()])
)
<=
kkt_problem.psi_node_voltage_magnitude_maximum
* kkt_problem.big_m_node_voltage_magnitude_maximum
)
kkt_problem.constraints.append(
kkt_problem.mu_node_voltage_magnitude_maximum
<=
(1 - kkt_problem.psi_node_voltage_magnitude_maximum)
* kkt_problem.big_m_node_voltage_magnitude_maximum
)
# Branch flow limits.
kkt_problem.constraints.append(
-1.0
* (
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
kkt_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
kkt_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
- np.array([branch_power_magnitude_vector_maximum.ravel()])
)
<=
kkt_problem.psi_branch_power_magnitude_maximum_1
* kkt_problem.big_m_branch_power_magnitude_maximum_1
)
kkt_problem.constraints.append(
kkt_problem.mu_branch_power_magnitude_maximum_1
<=
(1 - kkt_problem.psi_branch_power_magnitude_maximum_1)
* kkt_problem.big_m_branch_power_magnitude_maximum_1
)
kkt_problem.constraints.append(
-1.0
* (
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
kkt_problem.der_active_power_vector
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
kkt_problem.der_reactive_power_vector
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
- np.array([branch_power_magnitude_vector_maximum.ravel()])
)
<=
kkt_problem.psi_branch_power_magnitude_maximum_2
* kkt_problem.big_m_branch_power_magnitude_maximum_2
)
kkt_problem.constraints.append(
kkt_problem.mu_branch_power_magnitude_maximum_2
<=
(1 - kkt_problem.psi_branch_power_magnitude_maximum_2)
* kkt_problem.big_m_branch_power_magnitude_maximum_2
)
# Solve problem.
fledge.utils.log_time('KKT solution')
kkt_problem.solve()
fledge.utils.log_time('KKT solution')
# Obtain results.
# Flexible loads.
kkt_state_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.states)
kkt_control_vector = pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.controls)
kkt_output_vector = | pd.DataFrame(0.0, index=der_model_set.timesteps, columns=der_model_set.outputs) | pandas.DataFrame |
import logging
import pandas as pd
import dataiku
from dataiku.runnables import ResultTable
import datetime
import adal
import requests
import re
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO,
format='jira plugin %(levelname)s - %(message)s')
class AzureClient(object):
MANDATORY_COLUMNS = ["dss_group_name", "aad_group_name", "dss_profile"]
V103_REMAPPING_PRESETS = [{'from': '@', 'to': '_'}, {'from': '#', 'to': '_'}]
# Relevant URLs
authority_url = "https://login.microsoftonline.com/"
graph_url = "https://graph.microsoft.com/"
graph_group_url = "https://graph.microsoft.com/v1.0/groups?$filter=displayName eq '{}'&$select=id"
graph_members_url = "https://graph.microsoft.com/v1.0/groups/{}/members?$select=displayName,userPrincipalName"
# Define a translation dict that specifies how each credential should
# be named in the user's secrets
credentials_labels = {
"graph_tenant_id": "Tenant ID",
"graph_app_id": "Application ID",
"graph_app_secret": "App secret",
"graph_app_cert": "App certificate",
"graph_app_cert_thumb": "App certificate thumbprint",
"graph_user": "User principal",
"graph_user_pwd": "<PASSWORD>",
}
def __init__(self, project_key, config):
self.project_key = project_key
self.login_remapping = config.get("login_remapping", self.V103_REMAPPING_PRESETS)
self.assert_valid_login_remapping()
self.azure_ad_connection = config.get("azure_ad_connection", {})
self.flag_simulate = config.get("flag_simulate")
self.auth_method = self.azure_ad_connection.get("auth_method")
# Read the group configuration data from DSS
self.groups_dataset = config.get("groups_dataset", None)
if not self.groups_dataset:
raise Exception("No groups dataset has been selected.")
groups_dataset_handle = dataiku.Dataset(self.groups_dataset, self.project_key)
self.groups_df = groups_dataset_handle.get_dataframe()
self.client = dataiku.api_client()
self.run_user = self.client.get_auth_info()["authIdentifier"]
self.possible_dss_profiles = self.get_possible_dss_profiles()
self.session = requests.Session()
# Initialize a dataframe that will contain log data
self.log_df = pd.DataFrame(columns=["date", "user", "type", "message"])
# Configure auth method
self.required_credentials = self.get_required_credentials(
self.azure_ad_connection.get("auth_method")
)
# Read credentials
if self.azure_ad_connection.get("flag_user_credentials"):
self.credentials = self.get_credentials("user")
else:
self.credentials = self.get_credentials("parameters")
# Connect to Graph API
self.set_session_headers()
def assert_valid_login_remapping(self):
for login_remapping in self.login_remapping:
to_value = login_remapping.get("to")
if to_value and not self.is_valid_login(to_value):
raise Exception("'{}' in the login remapping is not a valid character for a DSS user login. Valid characters must match the regex pattern [a-zA-Z0-9@.+_-]".format(to_value))
@staticmethod
def is_valid_login(strg, search=re.compile(r'[^a-zA-Z0-9@.+_-]').search):
return not bool(search(strg))
def get_possible_dss_profiles(self):
self.available_dss_profiles = self.get_available_dss_profiles()
ordered_dss_profiles = self.groups_df["dss_profile"].tolist()
self.ranked_dss_profiles = []
for profile in ordered_dss_profiles:
if profile in self.available_dss_profiles and profile not in self.ranked_dss_profiles:
self.ranked_dss_profiles.append(profile)
return self.ranked_dss_profiles
def get_user_id(self, email):
"""
Creates a user ID based on an email address.
:param email: the email address
"""
for login_remapping in self.login_remapping:
from_char = login_remapping.get("from")
if from_char:
email = email.replace(from_char, login_remapping.get("to", ""))
return email
@staticmethod
def list_diff(list1, list2):
"""Return elements of list1 that are not present in list2."""
return list(set(list1) - set(list2))
def get_dss_profile(self, dss_profile_list):
"""
Given an list of dss_profile types, return the most potent dss_profile.
:param dss_profile_list: a list with dss_profiles
"""
# For each dss_profile type, going from most to least potent, see if it is present in the list.
# If so, return it as the assigned dss_profile type.
for dss_profile_type in self.possible_dss_profiles:
if dss_profile_type in dss_profile_list:
return dss_profile_type
# If no match was found above, default to no dss_profile
return "NONE"
def get_available_dss_profiles(self):
licensing = self.client.get_licensing_status()
user_profiles = licensing.get('base', []).get('userProfiles', [])
user_profiles.append("NONE")
return user_profiles
@staticmethod
def get_required_credentials(auth_method):
"""Determine which credentials are required, based on the authentication method.
:param auth_method: the selected authentication method
"""
required_credentials = ["graph_tenant_id", "graph_app_id"]
if auth_method == "auth_app_token":
required_credentials.extend(["graph_app_secret"])
elif auth_method == "auth_app_cert":
required_credentials.extend(["graph_app_cert", "graph_app_cert_thumb"])
elif auth_method == "auth_user_pwd":
required_credentials.extend(["graph_user", "graph_user_pwd"])
return required_credentials
def validate_groups_df(self):
"""Verifies that the groups data contains the correct columns and dss_profile types."""
# Validate existence of correct columns
column_names = list(self.groups_df.columns)
self.assert_mandatory_columns(column_names)
# Validate content of dss_profile column
dss_profile_values = list(self.groups_df["dss_profile"].unique())
impossible_dss_profiles = self.list_diff(dss_profile_values, self.possible_dss_profiles)
if impossible_dss_profiles:
raise Exception("Invalid dss_profile types were found in the groups configuration: {}. Valid dss_profile values are: {}".format(
impossible_dss_profiles,
self.possible_dss_profiles
)
)
def assert_mandatory_columns(self, column_names):
for mandatory_column in self.MANDATORY_COLUMNS:
if mandatory_column not in column_names:
raise Exception("The groups dataset is not correctly configured. {} is missing".format(mandatory_column))
def get_credentials(self, source):
"""
Returns a dictionary containing credentials for ADAL call to MS Graph.
:param source: where the credentials are taken from, either 'user' or 'parameters'
"""
# Empty list for missing credentials
missing_credentials = []
# Dictionary for present credentials
credentials = {}
if source == "user":
# Load secrets from user profile [{key: value} ...]
user_secrets = self.client.get_auth_info(with_secrets=True)["secrets"]
secrets_dict = {secret["key"]: secret["value"] for secret in user_secrets}
else:
secrets_dict = self.azure_ad_connection
# get token = secrets_dict.get("azure_ad_credentials")
# For each required credential, check whether it is present
for key in self.required_credentials:
label = self.credentials_labels[key]
try:
if source == "user":
credentials[key] = secrets_dict[label]
else: # source == "parameters":
credentials[key] = secrets_dict[key]
if not credentials[key]:
raise KeyError
except (KeyError, IndexError):
missing_credentials.append(label)
if missing_credentials:
raise KeyError("Please specify these credentials: {}".format(missing_credentials))
return credentials
def add_log(self, message, log_type="INFO"):
"""
Add a record to the logging dataframe.
:param message: The text to be logged
:param log_type: The message type, 'INFO' by default.
"""
new_log = {
"date": str(datetime.datetime.now()),
"user": self.run_user,
"type": log_type,
"message": message,
}
self.log_df = self.log_df.append(new_log, ignore_index=True)
def clear_log(self):
"""
Empties the log. Useful for testing.
"""
self.log_df = | pd.DataFrame(columns=["date", "user", "type", "message"]) | pandas.DataFrame |
import pandas
import glob
import urllib.request
url = 'http://example.com/'
open_data_ms = pandas.read_csv(urllib.request.urlopen("https://raw.githubusercontent.com/od-ms/resources/master/coronavirus-fallzahlen-regierungsbezirk-muenster.csv"))
open_data_ms['Datum'] = pandas.to_datetime(open_data_ms['Datum'], format='%d.%m.%Y')
open_data_ms = open_data_ms.sort_values(by='Datum')
confirmed_df = pandas.DataFrame({'Kommune': open_data_ms['Gebiet'].unique()})
recovered_df = pandas.DataFrame({'Kommune': open_data_ms['Gebiet'].unique()})
deaths_df = pandas.DataFrame({'Kommune': open_data_ms['Gebiet'].unique()})
confirmed_df = confirmed_df.set_index(['Kommune'], drop=True)
recovered_df = recovered_df.set_index(['Kommune'], drop=True)
deaths_df = deaths_df.set_index(['Kommune'], drop=True)
for day in open_data_ms['Datum'].dt.date.unique():
for kommune in open_data_ms['Gebiet'].unique():
all_data_kommune = open_data_ms[open_data_ms['Gebiet'] == kommune]
c = all_data_kommune[all_data_kommune['Datum'] == pandas.Timestamp(day)]['Bestätigte Faelle'].values
r = all_data_kommune[all_data_kommune['Datum'] == | pandas.Timestamp(day) | pandas.Timestamp |
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Bio import pairwise2
from scipy import interp
from scipy.stats import linregress
from sklearn.metrics import roc_curve, auc, precision_recall_curve
import thoipapy
import thoipapy.validation.bocurve
from thoipapy.utils import make_sure_path_exists
def collect_indiv_validation_data(s, df_set, logging, namedict, predictors, THOIPA_predictor_name, subsets):
"""
Parameters
----------
s
df_set
logging
namedict
predictors
THOIPA_predictor_name
Returns
-------
"""
logging.info("start collect_indiv_validation_data THOIPA_PREDDIMER_TMDOCK")
ROC_AUC_df = pd.DataFrame()
PR_AUC_df = pd.DataFrame()
mean_o_minus_r_by_sample_df = pd.DataFrame()
AUBOC_from_complete_data_ser = pd.Series()
AUC_AUBOC_name_list = []
linechar_name_list = []
AUBOC_list = []
df_o_minus_r_mean_df = pd.DataFrame()
roc_auc_mean_list = []
roc_auc_std_list = []
# indiv_validation_dir: Path = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation"
indiv_validation_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/indiv_validation_data.xlsx"
thoipapy.utils.make_sure_path_exists(indiv_validation_data_xlsx, isfile=True)
# if not os.path.isdir(os.path.dirname(BOAUC10_barchart_pdf)):
# os.makedirs(os.path.dirname(BOAUC10_barchart_pdf))
for predictor in predictors:
BO_data_df = pd.DataFrame()
xv_dict = {}
ROC_AUC_dict = {}
PR_AUC_dict = {}
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
auc_pkl = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/roc_auc/{predictor}/ROC_AUC_data.pkl"
BO_curve_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/BO_Curve_data.csv"
bocurve_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/bocurve_data.xlsx"
BO_linechart_png = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/BO_linechart.png"
BO_barchart_png = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/AUBOC_barchart.png"
df_o_minus_r_mean_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/df_o_minus_r_mean.csv"
thoipapy.utils.make_sure_path_exists(auc_pkl, isfile=True)
thoipapy.utils.make_sure_path_exists(BO_curve_data_csv, isfile=True)
for i in df_set.index:
sys.stdout.write(".")
sys.stdout.flush()
acc = df_set.loc[i, "acc"]
database = df_set.loc[i, "database"]
acc_db = acc + "-" + database
merged_data_csv_path: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/predictions/merged/{database}.{acc}.merged.csv"
merged_data_df = pd.read_csv(merged_data_csv_path, engine="python")
# invert some predictors so that a high number always indicates a predicted interface residue
merged_data_df["LIPS_L*E"] = -1 * merged_data_df["LIPS_L*E"]
merged_data_df["PREDDIMER"] = -1 * merged_data_df["PREDDIMER"]
merged_data_df["TMDOCK"] = -1 * merged_data_df["TMDOCK"]
if database == "crystal" or database == "NMR":
# invert the interface score of structural data so that a high number indicates an interface residue
merged_data_df["interface_score"] = -1 * merged_data_df["interface_score"]
# toggle whether to use boolean (interface) or continuous data (interface_score). Here we want continuous data
experiment_col = "interface_score"
BO_single_prot_df = thoipapy.validation.bocurve.calc_best_overlap_from_selected_column_in_df(acc_db, merged_data_df, experiment_col, predictor)
if BO_data_df.empty:
BO_data_df = BO_single_prot_df
else:
BO_data_df = pd.concat([BO_data_df, BO_single_prot_df], axis=1, join="outer")
df_for_roc = merged_data_df.dropna(subset=[experiment_col, predictor])
fpr, tpr, thresholds = roc_curve(df_for_roc.interface, df_for_roc[predictor], drop_intermediate=False)
precision, recall, thresholds_PRC = precision_recall_curve(df_for_roc.interface, df_for_roc[predictor])
pr_auc = auc(recall, precision)
PR_AUC_dict[acc_db] = pr_auc
roc_auc = auc(fpr, tpr)
ROC_AUC_dict[acc_db] = roc_auc
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
xv_dict[acc_db] = {"fpr": fpr, "tpr": tpr, "roc_auc": roc_auc, "precision": precision, "recall": recall, "pr_auc": pr_auc}
# save dict as pickle
with open(auc_pkl, "wb") as f:
pickle.dump(xv_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
BO_data_df.to_csv(BO_curve_data_csv)
# parse BO data csv
# print out mean values
thoipapy.validation.bocurve.parse_BO_data_csv_to_excel(BO_curve_data_csv, bocurve_data_xlsx, s["n_residues_AUBOC_validation"], logging, predictor)
# ROC AUC validation
ROC_AUC_ser = pd.Series(ROC_AUC_dict)
ROC_AUC_ser.sort_values(inplace=True, ascending=False)
roc_auc_mean_list.append(ROC_AUC_ser.mean())
roc_auc_std_list.append(ROC_AUC_ser.std())
# precision-recall AUC validation
PR_AUC_ser = pd.Series(PR_AUC_dict)
PR_AUC_ser.sort_values(inplace=True, ascending=False)
# BO curve AUBOC validation
mean_o_minus_r_by_sample_ser = pd.read_excel(bocurve_data_xlsx, sheet_name="mean_o_minus_r_by_sample", index_col=0)["mean_o_minus_r_by_sample"].copy()
df_o_minus_r = pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_minus_r", index_col=0)
df_o_minus_r.columns = pd.Series(df_o_minus_r.columns).replace(namedict)
df_o_minus_r_mean = df_o_minus_r.T.mean()
# df_o_minus_r_mean_df= pd.concat([df_o_minus_r_mean_df,df_o_minus_r_mean],axis=1, join="outer")
df_o_minus_r_mean_df[predictor] = df_o_minus_r_mean
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df_o_minus_r_mean.iloc[:s["n_residues_AUBOC_validation"]]
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
AUBOC_list.append(AUBOC)
AUBOC_from_complete_data_ser[predictor] = AUBOC
linechar_name_list.append(predictor)
AUC_AUBOC_name_list.append("{}-AUC".format(predictor))
AUC_AUBOC_name_list.append("{}-AUBOC".format(predictor))
thoipapy.figs.create_BOcurve_files.save_BO_linegraph_and_barchart(s, bocurve_data_xlsx, BO_linechart_png, BO_barchart_png, namedict,
logging, ROC_AUC_ser)
ROC_AUC_df[predictor] = ROC_AUC_ser
PR_AUC_df[predictor] = PR_AUC_ser
mean_o_minus_r_by_sample_df[predictor] = mean_o_minus_r_by_sample_ser
means_df = pd.DataFrame()
means_df["ROC_AUC"] = ROC_AUC_df.mean()
means_df["PR_AUC"] = PR_AUC_df.mean()
means_df["mean_o_minus_r_by_sample"] = mean_o_minus_r_by_sample_df.mean()
means_df["AUBOC_from_complete_data"] = AUBOC_from_complete_data_ser
""" means_df looks like this:
ROC_AUC PR_AUC AUBOC
THOIPA_5_LOO 0.629557 0.505823 1.202355
PREDDIMER 0.566582 0.416761 0.515193
TMDOCK 0.598387 0.421462 0.666720
"""
std_df = pd.DataFrame()
std_df["ROC_AUC"] = ROC_AUC_df.std()
std_df["PR_AUC"] = PR_AUC_df.std()
std_df["mean_o_minus_r_by_sample"] = mean_o_minus_r_by_sample_df.std()
SEM_df = pd.DataFrame()
SEM_df["ROC_AUC"] = ROC_AUC_df.std() / np.sqrt(ROC_AUC_df.shape[0])
SEM_df["PR_AUC"] = PR_AUC_df.std() / np.sqrt(PR_AUC_df.shape[0])
SEM_df["mean_o_minus_r_by_sample"] = mean_o_minus_r_by_sample_df.std() / np.sqrt(mean_o_minus_r_by_sample_df.shape[0])
with pd.ExcelWriter(indiv_validation_data_xlsx) as writer:
means_df.to_excel(writer, sheet_name="means")
std_df.to_excel(writer, sheet_name="std")
SEM_df.to_excel(writer, sheet_name="SEM")
ROC_AUC_df.to_excel(writer, sheet_name="ROC_AUC_indiv")
PR_AUC_df.to_excel(writer, sheet_name="PR_AUC_indiv")
# mean_o_minus_r_by_sample_df.to_excel(writer, sheet_name="BO_AUBOC_indiv")
mean_o_minus_r_by_sample_df.to_excel(writer, sheet_name="mean_o_minus_r_by_sample")
df_o_minus_r_mean_df.to_excel(writer, sheet_name="BO_o_minus_r")
if "TMDOCK" in PR_AUC_df.columns and "PREDDIMER" in PR_AUC_df.columns:
df_THOIPA_vs_others = pd.DataFrame()
df_THOIPA_vs_others["THOIPA_better_TMDOCK"] = PR_AUC_df[THOIPA_predictor_name] > PR_AUC_df.TMDOCK
df_THOIPA_vs_others["THOIPA_better_PREDDIMER"] = PR_AUC_df[THOIPA_predictor_name] > PR_AUC_df.PREDDIMER
df_THOIPA_vs_others["THOIPA_better_both"] = df_THOIPA_vs_others[["THOIPA_better_TMDOCK", "THOIPA_better_PREDDIMER"]].sum(axis=1) == 2
n_THOIPA_better_both = df_THOIPA_vs_others["THOIPA_better_both"].sum()
logging.info("THOIPA has higher precision-recall AUC than both TMDOCK and PREDDIMER for {}/{} proteins in {}".format(n_THOIPA_better_both, PR_AUC_df.shape[0], s["setname"]))
df_THOIPA_vs_others.to_excel(writer, sheet_name="THOIPA_vs_others")
# #sys.stdout.write(roc_auc_mean_list)
# AUBOC_mean_df = pd.DataFrame.from_records([AUBOC_list], columns=linechar_name_list)
# #AUBOC_mean_df.to_csv(mean_AUBOC_file)
# AUBOC_mean_df.to_excel(writer, sheet_name="AUBOC_mean")
# df_o_minus_r_mean_df.columns = linechar_name_list
# #ROC_AUC_df.columns = AUC_AUBOC_name_list
# ROC_AUC_df.index.name = "acc_db"
# #ROC_AUC_df.to_csv(AUC_AUBOC_file)
# THOIPA_best_set = s["THOIPA_best_set"]
#
# # AUC for barchart, 4 predictors, mean AUC of all proteins in dataset
# #logging.info("_finder : {}".format(mean_roc_auc_barchart_csv))
# AUC_4pred_mean_all_indiv_prot_df = pd.DataFrame(index = linechar_name_list)
# #AUC_4pred_mean_all_indiv_prot_df = pd.DataFrame([roc_auc_mean_list, roc_auc_std_list], index = linechar_name_list, columns=["mean", "std"])
# AUC_4pred_mean_all_indiv_prot_df["roc_auc_mean"] = roc_auc_mean_list
# AUC_4pred_mean_all_indiv_prot_df["roc_auc_std"] = roc_auc_std_list
# AUC_4pred_mean_all_indiv_prot_df["n"] = df_set.shape[0]
# AUC_4pred_mean_all_indiv_prot_df["SEM"] = AUC_4pred_mean_all_indiv_prot_df.roc_auc_std / AUC_4pred_mean_all_indiv_prot_df["n"].apply(np.sqrt)
# #AUC_4pred_mean_all_indiv_prot_df.to_csv(mean_roc_auc_barchart_csv)
# AUC_4pred_mean_all_indiv_prot_df.to_excel(writer, sheet_name="ROC_AUC_mean_indiv")
def create_indiv_validation_figs(s, logging, namedict, predictors, THOIPA_predictor_name, subsets):
perc_interf_vs_PR_cutoff_linechart_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/perc_interf_vs_PR_cutoff_linechart_data.csv"
indiv_validation_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/indiv_validation_data.xlsx"
indiv_validation_figs_dir: Path = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/figs"
make_sure_path_exists(indiv_validation_figs_dir)
indiv_ROC_AUC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "indiv_ROC_AUC_barchart.png"
indiv_PR_AUC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "indiv_PR_AUC_barchart.png"
AUBOC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "indiv_AUBOC_barchart.png"
BOCURVE_linechart_png: Union[Path, str] = indiv_validation_figs_dir / "BOcurve_linechart.png"
mean_ROC_AUC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "mean_ROC_AUC_barchart.png"
mean_PR_AUC_barchart_png: Union[Path, str] = indiv_validation_figs_dir / "mean_PR_AUC_barchart.png"
ROC_AUC_vs_PR_AUC_scatter_png: Union[Path, str] = indiv_validation_figs_dir / "ROC_AUC_vs_PR_AUC_scatter.png"
perc_interf_vs_PR_cutoff_linechart_png: Union[Path, str] = indiv_validation_figs_dir / "perc_interf_vs_PR_cutoff_linechart.png"
ROC_AUC_df = pd.read_excel(indiv_validation_data_xlsx, sheet_name="ROC_AUC_indiv", index_col=0)
PR_AUC_df = pd.read_excel(indiv_validation_data_xlsx, sheet_name="PR_AUC_indiv", index_col=0)
mean_o_minus_r_by_sample_df = | pd.read_excel(indiv_validation_data_xlsx, sheet_name="mean_o_minus_r_by_sample", index_col=0) | pandas.read_excel |
import numpy
import matplotlib.pyplot as plt
import tellurium as te
from rrplugins import Plugin
auto = Plugin("tel_auto2000")
from te_bifurcation import model2te, run_bf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
sf = ScalarFormatter()
sf.set_scientific(False)
import re
import seaborn as sns
import os
from pickle import dump, load
from sympy import *
import lhsmdu
import sobol_seq
import pickle
# Define symbolic variables for symbolic Jacobian
R, r, C1, C2, mR1, mR2, K, K1, K2, m, a, b, sR, ksi, ksm, ki0, ki1, km0, km1, k, sR, a1, a2, b1, b2, A = symbols('R r C1 C2 mR1 mR2 K K1 K2 m a b sR ksi ksm ki0 ki1 km0 km1 k s_R a1 a2 b1 b2 A', positive=True, real=True)
c1A, c1B, c2, rev, koff, kR, sR0, sR, g, s, C = symbols('c1A c1B c2 rev koff kR sR0 sR g s C', positive=True, real=True)
R, r, C, mR1, mR2, K, K1, K2, m, a, b, sR, ksi, ksm, ki0, ki1, km0, km1, k, kR, A = \
symbols('R r C mR1 mR2 K K1 K2 m a b sR ksi ksm ki0 ki1 km0 km1 k k_R A', positive=True, real=True)
# Samples of parameter values
n = int(1E2) # Production run 1E5
ss = sobol_seq.i4_sobol_generate(4, n)
l = np.power(2, -3 + (4+3)*ss[:,:2])
a1sp, b1sp = l[:,0], l[:,1]
Ksp = 10**(ss[:,-2]*(np.log10(70000)-np.log10(7)) + np.log10(7))
gsp = 10**(ss[:,-1]*(np.log10(2)-np.log10(0.02)) + np.log10(0.02))
# Model definition
model_mmi1_full = {
'pars': {
'sR': 0.0,
'a1' : 1,
'b1' : 1,
'sR0': 0.0,
'g': 1.0,
'K' : 10000,
'koff': 100,
},
'vars': {
'r': '1 - koff*K*R*r + koff*C - g*r + a1*C',
'R': 'sR0 + sR - koff*K*R*r + koff*C - R + b1*g*C',
'C': 'koff*K*R*r - koff*C - a1*C - b1*g*C',
},
'fns': {}, 'aux': [], 'name': 'mmi1_full'}
ics_1_mmi1_full = {'r': 0.9, 'R': 0.0, 'C': 0.0}
# Symbolic Jacobian
eqnD = {}
for k, v in model_mmi1_full['vars'].items():
eqnD[k] = parsing.sympy_parser.parse_expr(v, locals())
JnD = Matrix([eqnD['R'], eqnD['r'], eqnD['C']]).jacobian(Matrix([R, r, C]))
fJnD = lambdify((K, R, r, C, a1, b1, g, koff), JnD, 'numpy')
# Tellurium object
r = model2te(model_mmi1_full, ics=ics_1_mmi1_full)
uplim = 120
if 1:
# A new run
hb_cts, hbi, hbnds = 0, [], []
data_all = []
inuerr = []
for i in range(n):
print(i)
for j, p in enumerate(['a1', 'b1']):
r[p] = l[i,j]
r['g'], r['K'] = gsp[i], Ksp[i]
data, bounds, boundsh = run_bf(r, auto, dirc="+", par="sR", lims=[0,uplim],
ds=1E-2, dsmin=1E-5, dsmax=0.1)
if data.r.iloc[-1] < -1:
data, bounds, boundsh = run_bf(r, auto, dirc="+", par="sR", lims=[0,uplim],
ds=1E-2, dsmin=1E-5, dsmax=0.01)
data_all.append(data)
if len(boundsh) > 0:
print('HB point found')
hb_cts += 1
hbi.append(i)
hbnds.append(boundsh)
if 1: # Save the output
fn = './te_data/bf_data_MMI1.tebf'
specs = {'model':model_mmi1_full, 'n':n, 'uplim':uplim, 'Ksp':Ksp,
'gsp':gsp,
'a1sp':a1sp, 'b1sp':b1sp }
with open(fn, 'wb') as f:
pickle.dump({'data_all': data_all, 'specs': specs}, f)
print('Sets with HB: ', hb_cts)
print('Numerical errors', len(inuerr))
else:
# Reading a single file
fn = './te_data/bf_data_MMI1.tebf'
print('Reading', fn)
with open(fn, 'rb') as f:
f_cont = pickle.load(f)
data_all, specs = f_cont['data_all'], f_cont['specs']
n, uplim, Ksp, gsp = specs['n'], specs['uplim'], specs['Ksp'], specs['gsp']
a1sp, b1sp = specs['a1sp'], specs['b1sp']
print('Curves: '+str(n)+'\t','uplim: '+str(uplim))
for sp in ['Ksp', 'gsp', 'a1sp', 'b1sp']:
print(sp + ' is between %.4f and %.4f'%(specs[sp].min(), specs[sp].max()))
print('\n')
# More detailed analysis of the continuation output
oui = [] # Spiral sinks
hbi = [] # Hopf
mxi = [] # Hopf and SN
inuerr = []
binned_Rs = []
binned_Rts = []
binned_cons = []
hist_imag = np.zeros(60)
nR = 62
do_pars = []
for i, data in enumerate(data_all[:]):
if ((i+1) % 10000) == 0:
print(i+1)
if len(data) == 0:
inuerr.append(i)
continue
if data.PAR.iloc[-1] < (uplim-1) or data.PAR.iloc[-1] > (uplim+1):
mxi.append(i)
if (data.TY == 3).sum()>0:
hbi.append(i)
Rsp, rsp, Csp = data.R.values, data.r.values, data.C.values
JnDsp = fJnD(Ksp[i], Rsp, rsp, Csp, a1sp[i], b1sp[i], gsp[i], 100.0)
Jsp = np.zeros((JnDsp.shape[0], JnDsp.shape[0], Rsp.shape[0]))
for p in range(JnDsp.shape[0]):
for q in range(JnDsp.shape[1]):
Jsp[p,q,:] = JnDsp[p,q]
Jsp = np.swapaxes(np.swapaxes(Jsp, 0, 2), 1,2)
w, v = np.linalg.eig(Jsp)
#print(w)
if_imag = np.imag(w) != 0
imags = ((if_imag).sum(axis=1)>0) & (Rsp>-10) & (rsp>-10)
igt = np.where(Rsp>0.01)[0]
if (len(igt) > 0):
sRthr = data.PAR[igt[0]]
std_sigs = np.linspace(sRthr*0.0, sRthr*3.1, nR)
ids = np.searchsorted(data.PAR, std_sigs)
binned_R, binned_Rt = np.empty(nR), np.empty(nR)
binned_R[:], binned_Rt[:] = np.NaN, np.NaN
R_data = Rsp[[x for x in ids if x < Rsp.size]]
Rt_data = R_data + Csp[[x for x in ids if x < Rsp.size]]
binned_R[:R_data.size] = R_data
binned_Rt[:R_data.size] = Rt_data
binned_Rs.append(binned_R)
binned_Rts.append(binned_Rt)
binned_cons.append(std_sigs)
if imags.sum() > 0:
if (a1sp[i]>1 and b1sp[i]>1) or (a1sp[i]<1 and b1sp[i]<1):
continue
rmax, imax = np.real(w).min(axis=1), np.imag(w).max(axis=1)
oui.append(i)
imagi = np.where(imags>0)[0]
if len(igt) > 0:
hs, bins = np.histogram(data.PAR[imagi], bins=np.linspace(sRthr*0.0, sRthr*3.0, hist_imag.size+1))
hist_imag = hist_imag + ((hs>0)+0)
fig, ax = plt.subplots(figsize=(3,3))
fig.subplots_adjust(bottom=0.2, right=0.78, left=0.15)
ax2 = ax.twinx()
ax2.bar(range(hist_imag.size), hist_imag/n, color='y', zorder=-10, width=1.0, alpha=0.5)
dfl = pd.DataFrame(binned_Rs).melt()
sns.lineplot(x="variable", y="value", data=dfl, color='k', ax=ax, ci=99.9, palette="flare")
ax.set_ylabel(r'Steady state $\it{R}$ (A.U.)')
ax.set_xlabel(r'$\sigma_R$')
ax.set_xticks([0, 20, 40, 60])
ltr = r'$\hat{\it{\sigma_R}}$'
ax.set_xticklabels([0, ltr, r'2$\times$'+ltr, r'3$\times$'+ltr])
ax.set_xlim(0, 40)
ax.spines['right'].set_color('y')
ax2.spines['right'].set_color('y')
ax2.yaxis.label.set_color('y')
ax2.tick_params(axis='y', colors='y')
ax2.set_ylabel(r'Frequency (spiral sink)')
plt.show()
figc, axc = plt.subplots(figsize=(4,3))
figc.subplots_adjust(bottom=0.2, right=0.90, left=0.25)
sns.lineplot(x="variable", y="value", data=dfl, color='k', ax=axc, ci=99.9, palette="flare", label=r'$\it{R}$')
dft = pd.DataFrame(binned_Rts).melt()
sns.lineplot(x="variable", y="value", data=dft, color='m', ax=axc, ci=99.9, palette="flare", label=r'$\it{R}\rm{_T}$')
dfc = pd.DataFrame(binned_cons).melt()
sns.lineplot(x="variable", y="value", data=dfc, color='b', ax=axc, ci=99.9, palette="flare", label=r'$\it{R}\rm{_T} (=\it{R})$'+'\nw/o microRNA')
axc.set_ylabel('Steady state mRNA\nconcentration (A.U.)')
axc.set_xlabel(r'$\sigma_R$')
axc.set_yscale('log')
axc.set_ylim(1E-4, 60)
axc.set_xticks([0, 20, 40, 60])
ltr = r'$\hat{\it{\sigma_R}}$'
axc.set_xticklabels([0, ltr, r'2$\times$'+ltr, r'3$\times$'+ltr])
axc.set_xlim(0, 40)
axc.set_yscale('log')
axc.set_ylim(1E-4, 60)
axc.legend()
plt.show()
bi = list(set(range(n)) - set(oui))
astr, bstr, gstr, Kstr = r'$\it{\alpha}$', r'$\it{\beta}$', r'$\it{\gamma}$', r'$1/\it{K}$'
cat_strs = [r'Stable node for all $\it{\sigma_R}$', r'Spiral sink for some $\it{\sigma_R}$']
dfb = | pd.DataFrame.from_dict({astr:a1sp[bi], bstr:b1sp[bi], gstr:gsp[bi], Kstr: Ksp[bi],'Category': cat_strs[0]}) | pandas.DataFrame.from_dict |
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
from hydra.utils import to_absolute_path
from joblib import Parallel, delayed
from sklearn.cluster import KMeans
from tqdm import tqdm
data_dir = to_absolute_path("../../input/optiver-realized-volatility-prediction/") + "/"
# Function to calculate first WAP
def calc_wap1(df: pd.DataFrame) -> pd.Series:
wap = (df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]) / (
df["bid_size1"] + df["ask_size1"]
)
return wap
# Function to calculate second WAP
def calc_wap2(df: pd.DataFrame) -> pd.Series:
wap = (df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]) / (
df["bid_size2"] + df["ask_size2"]
)
return wap
def calc_wap3(df: pd.DataFrame) -> pd.Series:
wap = (df["bid_price1"] * df["bid_size1"] + df["ask_price1"] * df["ask_size1"]) / (
df["bid_size1"] + df["ask_size1"]
)
return wap
def calc_wap4(df: pd.DataFrame) -> pd.Series:
wap = (df["bid_price2"] * df["bid_size2"] + df["ask_price2"] * df["ask_size2"]) / (
df["bid_size2"] + df["ask_size2"]
)
return wap
def encode_mean(column: str, df: pd.DataFrame) -> float:
avg = df.groupby("time_id")[column].transform("mean")
return np.abs(df[column].sub(avg).div(avg))
# Function to calculate the log of the return
# Remember that logb(x / y) = logb(x) - logb(y)
def log_return(series: pd.DataFrame) -> np.ndarray:
return np.log(series).diff()
# Calculate the realized volatility
def realized_volatility(series: pd.DataFrame) -> np.ndarray:
return np.sqrt(np.sum(series ** 2))
# Function to count unique elements of a series
def count_unique(series: pd.DataFrame) -> np.ndarray:
return len(np.unique(series))
def is_high_realized_volatility(
train: pd.DataFrame, test: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
for i in tqdm(range(1, 5)):
train[f"log_return{i}_realized_volatility_is_high"] = train[
f"log_return{i}_realized_volatility"
].apply(lambda x: 0 if 0.0001 <= x <= 0.0003 else 1)
test[f"log_return{i}_realized_volatility_is_high"] = test[
f"log_return{i}_realized_volatility"
].apply(lambda x: 0 if 0.0001 <= x <= 0.0003 else 1)
return train, test
# Function to read our base train and test set
def read_train_test(path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
train = pd.read_csv(path + "train.csv")
test = pd.read_csv(path + "test.csv")
# Create a key to merge with book and trade data
train["row_id"] = train["stock_id"].astype(str) + "-" + train["time_id"].astype(str)
test["row_id"] = test["stock_id"].astype(str) + "-" + test["time_id"].astype(str)
print(f"Our training set has {train.shape[0]} rows")
return train, test
# Function to read our base train and test set
def read_test(path: str) -> pd.DataFrame:
test = pd.read_csv(path + "test.csv")
# Create a key to merge with book and trade data
test["row_id"] = test["stock_id"].astype(str) + "-" + test["time_id"].astype(str)
return test
# Function to preprocess book data (for each stock id)
def book_preprocessor(file_path: str):
df = pd.read_parquet(file_path)
# Calculate Wap
df["wap1"] = calc_wap1(df)
df["wap2"] = calc_wap2(df)
df["wap3"] = calc_wap3(df)
df["wap4"] = calc_wap4(df)
# Calculate log returns
df["log_return1"] = df.groupby(["time_id"])["wap1"].apply(log_return)
df["log_return2"] = df.groupby(["time_id"])["wap2"].apply(log_return)
df["log_return3"] = df.groupby(["time_id"])["wap3"].apply(log_return)
df["log_return4"] = df.groupby(["time_id"])["wap4"].apply(log_return)
# Calculate wap balance
df["wap_balance"] = abs(df["wap1"] - df["wap2"])
# Calculate spread
df["price_spread"] = (df["ask_price1"] - df["bid_price1"]) / (
(df["ask_price1"] + df["bid_price1"]) / 2
)
df["price_spread2"] = (df["ask_price2"] - df["bid_price2"]) / (
(df["ask_price2"] + df["bid_price2"]) / 2
)
df["bid_spread"] = df["bid_price1"] - df["bid_price2"]
df["ask_spread"] = df["ask_price1"] - df["ask_price2"]
df["bid_ask_spread"] = abs(df["bid_spread"] - df["ask_spread"])
df["total_volume"] = (df["ask_size1"] + df["ask_size2"]) + (
df["bid_size1"] + df["bid_size2"]
)
df["volume_imbalance"] = abs(
(df["ask_size1"] + df["ask_size2"]) - (df["bid_size1"] + df["bid_size2"])
)
# Dict for aggregations
create_feature_dict = {
"wap1": [np.sum, np.std],
"wap2": [np.sum, np.std],
"wap3": [np.sum, np.std],
"wap4": [np.sum, np.std],
"log_return1": [realized_volatility],
"log_return2": [realized_volatility],
"log_return3": [realized_volatility],
"log_return4": [realized_volatility],
"wap_balance": [np.sum, np.max],
"price_spread": [np.sum, np.max],
"price_spread2": [np.sum, np.max],
"bid_spread": [np.sum, np.max],
"ask_spread": [np.sum, np.max],
"total_volume": [np.sum, np.max],
"volume_imbalance": [np.sum, np.max],
"bid_ask_spread": [np.sum, np.max],
}
create_feature_dict_time = {
"log_return1": [realized_volatility],
"log_return2": [realized_volatility],
"log_return3": [realized_volatility],
"log_return4": [realized_volatility],
}
# Function to get group stats for different windows (seconds in bucket)
def get_stats_window(
fe_dict: Dict[str, List[Any]], seconds_in_bucket: int, add_suffix: bool = False
) -> pd.DataFrame:
# Group by the window
df_feature = (
df[df["seconds_in_bucket"] >= seconds_in_bucket]
.groupby(["time_id"])
.agg(fe_dict)
.reset_index()
)
# Rename columns joining suffix
df_feature.columns = ["_".join(col) for col in df_feature.columns]
# Add a suffix to differentiate windows
if add_suffix:
df_feature = df_feature.add_suffix("_" + str(seconds_in_bucket))
return df_feature
# Get the stats for different windows
df_feature = get_stats_window(
create_feature_dict, seconds_in_bucket=0, add_suffix=False
)
df_feature_500 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=500, add_suffix=True
)
df_feature_400 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=400, add_suffix=True
)
df_feature_300 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=300, add_suffix=True
)
df_feature_200 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=200, add_suffix=True
)
df_feature_100 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=100, add_suffix=True
)
# Merge all
df_feature = df_feature.merge(
df_feature_500, how="left", left_on="time_id_", right_on="time_id__500"
)
df_feature = df_feature.merge(
df_feature_400, how="left", left_on="time_id_", right_on="time_id__400"
)
df_feature = df_feature.merge(
df_feature_300, how="left", left_on="time_id_", right_on="time_id__300"
)
df_feature = df_feature.merge(
df_feature_200, how="left", left_on="time_id_", right_on="time_id__200"
)
df_feature = df_feature.merge(
df_feature_100, how="left", left_on="time_id_", right_on="time_id__100"
)
# Drop unnecesary time_ids
df_feature.drop(
[
"time_id__500",
"time_id__400",
"time_id__300",
"time_id__200",
"time_id__100",
],
axis=1,
inplace=True,
)
# Create row_id so we can merge
stock_id = file_path.split("=")[1]
df_feature["row_id"] = df_feature["time_id_"].apply(lambda x: f"{stock_id}-{x}")
df_feature.drop(["time_id_"], axis=1, inplace=True)
return df_feature
# Function to preprocess trade data (for each stock id)
def trade_preprocessor(file_path: str) -> pd.DataFrame:
df = pd.read_parquet(file_path)
df["log_return"] = df.groupby("time_id")["price"].apply(log_return)
df["amount"] = df["price"] * df["size"]
# Dict for aggregations
create_feature_dict = {
"log_return": [realized_volatility],
"seconds_in_bucket": [count_unique],
"size": [np.sum, np.max, np.min],
"order_count": [np.sum, np.max],
"amount": [np.sum, np.max, np.min],
}
create_feature_dict_time = {
"log_return": [realized_volatility],
"seconds_in_bucket": [count_unique],
"size": [np.sum],
"order_count": [np.sum],
}
# Function to get group stats for different windows (seconds in bucket)
def get_stats_window(fe_dict, seconds_in_bucket, add_suffix=False):
# Group by the window
df_feature = (
df[df["seconds_in_bucket"] >= seconds_in_bucket]
.groupby(["time_id"])
.agg(fe_dict)
.reset_index()
)
# Rename columns joining suffix
df_feature.columns = ["_".join(col) for col in df_feature.columns]
# Add a suffix to differentiate windows
if add_suffix:
df_feature = df_feature.add_suffix("_" + str(seconds_in_bucket))
return df_feature
# Get the stats for different windows
df_feature = get_stats_window(
create_feature_dict, seconds_in_bucket=0, add_suffix=False
)
df_feature_500 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=500, add_suffix=True
)
df_feature_400 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=400, add_suffix=True
)
df_feature_300 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=300, add_suffix=True
)
df_feature_200 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=200, add_suffix=True
)
df_feature_100 = get_stats_window(
create_feature_dict_time, seconds_in_bucket=100, add_suffix=True
)
def tendency(price: np.ndarray, vol: np.ndarray) -> float:
df_diff = np.diff(price)
val = (df_diff / price[1:]) * 100
power = np.sum(val * vol[1:])
return power
lis = []
for n_time_id in df["time_id"].unique():
df_id = df[df["time_id"] == n_time_id]
tendencyV = tendency(df_id["price"].values, df_id["size"].values)
f_max = np.sum(df_id["price"].values > np.mean(df_id["price"].values))
f_min = np.sum(df_id["price"].values < np.mean(df_id["price"].values))
df_max = np.sum(np.diff(df_id["price"].values) > 0)
df_min = np.sum(np.diff(df_id["price"].values) < 0)
# new
abs_diff = np.median(
np.abs(df_id["price"].values - np.mean(df_id["price"].values))
)
energy = np.mean(df_id["price"].values ** 2)
iqr_p = np.percentile(df_id["price"].values, 75) - np.percentile(
df_id["price"].values, 25
)
# vol vars
abs_diff_v = np.median(
np.abs(df_id["size"].values - np.mean(df_id["size"].values))
)
energy_v = np.sum(df_id["size"].values ** 2)
iqr_p_v = np.percentile(df_id["size"].values, 75) - np.percentile(
df_id["size"].values, 25
)
lis.append(
{
"time_id": n_time_id,
"tendency": tendencyV,
"f_max": f_max,
"f_min": f_min,
"df_max": df_max,
"df_min": df_min,
"abs_diff": abs_diff,
"energy": energy,
"iqr_p": iqr_p,
"abs_diff_v": abs_diff_v,
"energy_v": energy_v,
"iqr_p_v": iqr_p_v,
}
)
df_lr = pd.DataFrame(lis)
df_feature = df_feature.merge(
df_lr, how="left", left_on="time_id_", right_on="time_id"
)
# Merge all
df_feature = df_feature.merge(
df_feature_500, how="left", left_on="time_id_", right_on="time_id__500"
)
df_feature = df_feature.merge(
df_feature_400, how="left", left_on="time_id_", right_on="time_id__400"
)
df_feature = df_feature.merge(
df_feature_300, how="left", left_on="time_id_", right_on="time_id__300"
)
df_feature = df_feature.merge(
df_feature_200, how="left", left_on="time_id_", right_on="time_id__200"
)
df_feature = df_feature.merge(
df_feature_100, how="left", left_on="time_id_", right_on="time_id__100"
)
# Drop unnecesary time_ids
df_feature.drop(
[
"time_id__500",
"time_id__400",
"time_id__300",
"time_id__200",
"time_id",
"time_id__100",
],
axis=1,
inplace=True,
)
df_feature = df_feature.add_prefix("trade_")
stock_id = file_path.split("=")[1]
df_feature["row_id"] = df_feature["trade_time_id_"].apply(
lambda x: f"{stock_id}-{x}"
)
df_feature.drop(["trade_time_id_"], axis=1, inplace=True)
return df_feature
def encode_timeid(
train: pd.DataFrame, test: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
columns_to_encode = [
"wap1_sum",
"wap2_sum",
"wap3_sum",
"wap4_sum",
"log_return1_realized_volatility",
"log_return2_realized_volatility",
"log_return3_realized_volatility",
"log_return4_realized_volatility",
"wap_balance_sum",
"price_spread_sum",
"price_spread2_sum",
"bid_spread_sum",
"ask_spread_sum",
"total_volume_sum",
"volume_imbalance_sum",
"bid_ask_spread_sum",
"trade_log_return_realized_volatility",
"trade_seconds_in_bucket_count_unique",
"trade_size_sum",
"trade_order_count_sum",
"trade_amount_sum",
"trade_tendency",
"trade_f_max",
"trade_df_max",
"trade_abs_diff",
"trade_energy",
"trade_iqr_p",
"trade_abs_diff_v",
"trade_energy_v",
"trade_iqr_p_v",
]
df_aux = Parallel(n_jobs=-1, verbose=1)(
delayed(encode_mean)(column, train) for column in columns_to_encode
)
# Get group stats of time_id and stock_id
train = pd.concat(
[train] + [x.rename(x.name + "_timeid_encoded") for x in df_aux], axis=1
)
del df_aux
df_aux = Parallel(n_jobs=-1, verbose=1)(
delayed(encode_mean)(column, test) for column in columns_to_encode
)
# Get group stats of time_id and stock_id
test = pd.concat(
[test] + [x.rename(x.name + "_timeid_encoded") for x in df_aux], axis=1
)
del df_aux
return train, test
# Function to get group stats for the stock_id and time_id
def get_time_stock(df: pd.DataFrame) -> pd.DataFrame:
vol_cols = [
"log_return1_realized_volatility",
"log_return2_realized_volatility",
"log_return1_realized_volatility_400",
"log_return2_realized_volatility_400",
"log_return1_realized_volatility_300",
"log_return2_realized_volatility_300",
"log_return1_realized_volatility_200",
"log_return2_realized_volatility_200",
"trade_log_return_realized_volatility",
"trade_log_return_realized_volatility_400",
"trade_log_return_realized_volatility_300",
"trade_log_return_realized_volatility_200",
]
# Group by the stock id
df_stock_id = (
df.groupby(["stock_id"])[vol_cols]
.agg(
[
"mean",
"std",
"max",
"min",
]
)
.reset_index()
)
# Rename columns joining suffix
df_stock_id.columns = ["_".join(col) for col in df_stock_id.columns]
df_stock_id = df_stock_id.add_suffix("_" + "stock")
# Group by the stock id
df_time_id = (
df.groupby(["time_id"])[vol_cols]
.agg(
[
"mean",
"std",
"max",
"min",
]
)
.reset_index()
)
# Rename columns joining suffix
df_time_id.columns = ["_".join(col) for col in df_time_id.columns]
df_time_id = df_time_id.add_suffix("_" + "time")
# Merge with original dataframe
df = df.merge(
df_stock_id, how="left", left_on=["stock_id"], right_on=["stock_id__stock"]
)
df = df.merge(
df_time_id, how="left", left_on=["time_id"], right_on=["time_id__time"]
)
df.drop(["stock_id__stock", "time_id__time"], axis=1, inplace=True)
return df
# Funtion to make preprocessing function in parallel (for each stock id)
def preprocessor(list_stock_ids: np.ndarray, is_train: bool = True) -> pd.DataFrame:
# Parrallel for loop
def for_joblib(stock_id: int) -> pd.DataFrame:
# Train
if is_train:
file_path_book = data_dir + "book_train.parquet/stock_id=" + str(stock_id)
file_path_trade = data_dir + "trade_train.parquet/stock_id=" + str(stock_id)
# Test
else:
file_path_book = data_dir + "book_test.parquet/stock_id=" + str(stock_id)
file_path_trade = data_dir + "trade_test.parquet/stock_id=" + str(stock_id)
# Preprocess book and trade data and merge them
df_tmp = pd.merge(
book_preprocessor(file_path_book),
trade_preprocessor(file_path_trade),
on="row_id",
how="left",
)
# Return the merge dataframe
return df_tmp
# Use parallel api to call paralle for loop
df = Parallel(n_jobs=-1, verbose=1)(
delayed(for_joblib)(stock_id) for stock_id in list_stock_ids
)
# Concatenate all the dataframes that return from Parallel
df = pd.concat(df, ignore_index=True)
return df
# replace by order sum (tau)
def add_tau_feature(
train: pd.DataFrame, test: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
train["size_tau"] = np.sqrt(1 / train["trade_seconds_in_bucket_count_unique"])
test["size_tau"] = np.sqrt(1 / test["trade_seconds_in_bucket_count_unique"])
# train['size_tau_450'] = np.sqrt( 1/ train['trade_seconds_in_bucket_count_unique_450'] )
# test['size_tau_450'] = np.sqrt( 1/ test['trade_seconds_in_bucket_count_unique_450'] )
train["size_tau_400"] = np.sqrt(
1 / train["trade_seconds_in_bucket_count_unique_400"]
)
test["size_tau_400"] = np.sqrt(1 / test["trade_seconds_in_bucket_count_unique_400"])
train["size_tau_300"] = np.sqrt(
1 / train["trade_seconds_in_bucket_count_unique_300"]
)
test["size_tau_300"] = np.sqrt(1 / test["trade_seconds_in_bucket_count_unique_300"])
# train['size_tau_150'] = np.sqrt( 1/ train['trade_seconds_in_bucket_count_unique_150'] )
# test['size_tau_150'] = np.sqrt( 1/ test['trade_seconds_in_bucket_count_unique_150'] )
train["size_tau_200"] = np.sqrt(
1 / train["trade_seconds_in_bucket_count_unique_200"]
)
test["size_tau_200"] = np.sqrt(1 / test["trade_seconds_in_bucket_count_unique_200"])
train["size_tau2"] = np.sqrt(1 / train["trade_order_count_sum"])
test["size_tau2"] = np.sqrt(1 / test["trade_order_count_sum"])
# train['size_tau2_450'] = np.sqrt( 0.25/ train['trade_order_count_sum'] )
# test['size_tau2_450'] = np.sqrt( 0.25/ test['trade_order_count_sum'] )
train["size_tau2_400"] = np.sqrt(0.33 / train["trade_order_count_sum"])
test["size_tau2_400"] = np.sqrt(0.33 / test["trade_order_count_sum"])
train["size_tau2_300"] = np.sqrt(0.5 / train["trade_order_count_sum"])
test["size_tau2_300"] = np.sqrt(0.5 / test["trade_order_count_sum"])
# train['size_tau2_150'] = np.sqrt( 0.75/ train['trade_order_count_sum'] )
# test['size_tau2_150'] = np.sqrt( 0.75/ test['trade_order_count_sum'] )
train["size_tau2_200"] = np.sqrt(0.66 / train["trade_order_count_sum"])
test["size_tau2_200"] = np.sqrt(0.66 / test["trade_order_count_sum"])
# delta tau
train["size_tau2_d"] = train["size_tau2_400"] - train["size_tau2"]
test["size_tau2_d"] = test["size_tau2_400"] - test["size_tau2"]
return train, test
def create_agg_features(
train: pd.DataFrame, test: pd.DataFrame, path: str
) -> Tuple[pd.DataFrame, pd.DataFrame]:
# Making agg features
train_p = pd.read_csv(path + "train.csv")
train_p = train_p.pivot(index="time_id", columns="stock_id", values="target")
corr = train_p.corr()
ids = corr.index
kmeans = KMeans(n_clusters=7, random_state=0).fit(corr.values)
indexes = [
[(x - 1) for x in ((ids + 1) * (kmeans.labels_ == n)) if x > 0]
for n in tqdm(range(7))
]
mat = []
mat_test = []
n = 0
for ind in tqdm(indexes):
new_df = train.loc[train["stock_id"].isin(ind)]
new_df = new_df.groupby(["time_id"]).agg(np.nanmean)
new_df.loc[:, "stock_id"] = str(n) + "c1"
mat.append(new_df)
new_df = test.loc[test["stock_id"].isin(ind)]
new_df = new_df.groupby(["time_id"]).agg(np.nanmean)
new_df.loc[:, "stock_id"] = str(n) + "c1"
mat_test.append(new_df)
n += 1
mat1 = pd.concat(mat).reset_index()
mat1.drop(columns=["target"], inplace=True)
mat2 = pd.concat(mat_test).reset_index()
mat2 = pd.concat([mat2, mat1.loc[mat1.time_id == 5]])
mat1 = mat1.pivot(index="time_id", columns="stock_id")
mat1.columns = ["_".join(x) for x in tqdm(mat1.columns.tolist())]
mat1.reset_index(inplace=True)
mat2 = mat2.pivot(index="time_id", columns="stock_id")
mat2.columns = ["_".join(x) for x in tqdm(mat2.columns.tolist())]
mat2.reset_index(inplace=True)
prefix = [
"log_return1_realized_volatility",
"total_volume_sum",
"trade_size_sum",
"trade_order_count_sum",
"price_spread_sum",
"bid_spread_sum",
"ask_spread_sum",
"volume_imbalance_sum",
"bid_ask_spread_sum",
"size_tau2",
]
selected_cols = mat1.filter(
regex="|".join(f"^{x}.(0|1|3|4|6)c1" for x in tqdm(prefix))
).columns.tolist()
selected_cols.append("time_id")
train_m = pd.merge(train, mat1[selected_cols], how="left", on="time_id")
test_m = pd.merge(test, mat2[selected_cols], how="left", on="time_id")
# filling missing values with train means
features = [
col
for col in train_m.columns.tolist()
if col not in ["time_id", "target", "row_id"]
]
train_m[features] = train_m[features].fillna(train_m[features].mean())
test_m[features] = test_m[features].fillna(train_m[features].mean())
return train_m, test_m
def network_agg_features(
train: pd.DataFrame, test: pd.DataFrame, path: str
) -> Tuple[pd.DataFrame, pd.DataFrame]:
# Making agg features
train_p = pd.read_csv(path + "train.csv")
train_p = train_p.pivot(index="time_id", columns="stock_id", values="target")
corr = train_p.corr()
ids = corr.index
kmeans = KMeans(n_clusters=7, random_state=0).fit(corr.values)
indexes = [
[(x - 1) for x in ((ids + 1) * (kmeans.labels_ == n)) if x > 0]
for n in tqdm(range(7))
]
mat = []
mat_test = []
n = 0
for ind in tqdm(indexes):
new_df = train.loc[train["stock_id"].isin(ind)]
new_df = new_df.groupby(["time_id"]).agg(np.nanmean)
new_df.loc[:, "stock_id"] = str(n) + "c1"
mat.append(new_df)
new_df = test.loc[test["stock_id"].isin(ind)]
new_df = new_df.groupby(["time_id"]).agg(np.nanmean)
new_df.loc[:, "stock_id"] = str(n) + "c1"
mat_test.append(new_df)
n += 1
mat1 = pd.concat(mat).reset_index()
mat1.drop(columns=["target"], inplace=True)
mat2 = pd.concat(mat_test).reset_index()
mat2 = pd.concat([mat2, mat1.loc[mat1.time_id == 5]])
mat1 = mat1.pivot(index="time_id", columns="stock_id")
mat1.columns = ["_".join(x) for x in tqdm(mat1.columns.tolist())]
mat1.reset_index(inplace=True)
mat2 = mat2.pivot(index="time_id", columns="stock_id")
mat2.columns = ["_".join(x) for x in tqdm(mat2.columns.tolist())]
mat2.reset_index(inplace=True)
prefix = [
"log_return1_realized_volatility",
"total_volume_mean",
"trade_size_mean",
"trade_order_count_mean",
"price_spread_mean",
"bid_spread_mean",
"ask_spread_mean",
"volume_imbalance_mean",
"bid_ask_spread_mean",
"size_tau2",
]
selected_cols = mat1.filter(
regex="|".join(f"^{x}.(0|1|3|4|6)c1" for x in prefix)
).columns.tolist()
selected_cols.append("time_id")
train_m = pd.merge(train, mat1[selected_cols], how="left", on="time_id")
test_m = | pd.merge(test, mat2[selected_cols], how="left", on="time_id") | pandas.merge |
from __future__ import division
import math
import sys
from random import randint
from random import random as rnd
from reoccuring_drift_stream import ReoccuringDriftStream
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from scipy.spatial.distance import cdist
from scipy.special import logit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import validation
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
from skmultiflow.drift_detection import KSWIN
from skmultiflow.data.mixed_generator import MIXEDGenerator
#Abrupt Concept Drift Generators
from skmultiflow.drift_detection.adwin import ADWIN
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
from bix.classifiers.rrslvq import RRSLVQ
#!sr/bin/env python3
#-*- coding: utf-8 -*-
"""
Created on Fri Jun 22 09:35:11 2018
@author: moritz
"""
# TODO: add sigma for every prototype (TODO from https://github.com/MrNuggelz/sklearn-lvq)
class RRSLVQ(ClassifierMixin, BaseEstimator):
"""Robust Soft Learning Vector Quantization
Parameters
----------
prototypes_per_class : int or list of int, optional (default=1)
Number of prototypes per class. Use list to specify different
numbers per class.
initial_prototypes : array-like, shape = [n_prototypes, n_features + 1],
optional
Prototypes to start with. If not given initialization near the class
means. Class label must be placed as last entry of each prototype.
sigma : float, optional (default=0.5)
Variance for the distribution.
max_iter : int, optional (default=2500)
The maximum number of iterations.
gtol : float, optional (default=1e-5)
Gradient norm must be less than gtol before successful termination
of bfgs.
display : boolean, optional (default=False)
print information about the bfgs steps.
random_state : int, RandomState instance or None, optional
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
gradient_descent : string, Gradient Descent describes the used technique
to perform the gradient descent. Possible values: 'SGD' (default),
and 'l-bfgs-b'.
drift_handling : string, Type of concept drift DETECTION.
None means no concept drift detection
If KS, use of Kolmogorov Smirnov test
If ADWIN, use of Adaptive Sliding Window dimension wise
IF DIST, monitoring class distances to detect outlier.
Attributes
----------
w_ : array-like, shape = [n_prototypes, n_features]
Prototype vector, where n_prototypes in the number of prototypes and
n_features is the number of features
c_w_ : array-like, shape = [n_prototypes]
Prototype classes
classes_ : array-like, shape = [n_classes]
Array containing labels.
initial_fit : boolean, indicator for initial fitting. Set to false after
first call of fit/partial fit.
"""
def __init__(self, prototypes_per_class=1, initial_prototypes=None,
sigma=1.0, max_iter=2500, gtol=1e-5,
display=False, random_state=None,drift_handling = "KS",confidence=0.05,replace = True):
self.sigma = sigma
self.confidence = confidence
self.random_state = random_state
self.initial_prototypes = initial_prototypes
self.prototypes_per_class = prototypes_per_class
self.display = display
self.max_iter = max_iter
self.gtol = gtol
self.initial_fit = True
self.max_class_distances = None
self.classes_ = []
self.counter = 0
self.cd_detects = []
self.drift_handling = drift_handling
self.drift_detected = False
self.replace = replace
self.init_drift_detection = True
self.some = []
self.bg_data = [[],[]]
if not isinstance(self.display, bool):
raise ValueError("display must be a boolean")
if not isinstance(self.max_iter, int) or self.max_iter < 1:
raise ValueError("max_iter must be an positive integer")
if not isinstance(self.gtol, float) or self.gtol <= 0:
raise ValueError("gtol must be a positive float")
def _optfun(self, variables, training_data, label_equals_prototype):
n_data, n_dim = training_data.shape
nb_prototypes = self.c_w_.size
prototypes = variables.reshape(nb_prototypes, n_dim)
out = 0
for i in range(n_data):
xi = training_data[i]
y = label_equals_prototype[i]
fs = [self._costf(xi, w) for w in prototypes]
fs_max = max(fs)
s1 = sum([np.math.exp(fs[i] - fs_max) for i in range(len(fs))
if self.c_w_[i] == y])
s2 = sum([np.math.exp(f - fs_max) for f in fs])
s1 += 0.0000001
s2 += 0.0000001
out += math.log(s1 / s2)
return -out
def _optimize(self, X, y, random_state):
"""Implementation of Stochastical Gradient Descent"""
n_data, n_dim = X.shape
nb_prototypes = self.c_w_.size
prototypes = self.w_.reshape(nb_prototypes, n_dim)
for i in range(n_data):
xi = X[i]
c_xi = y[i]
for j in range(prototypes.shape[0]):
d = (xi - prototypes[j])
c = 0.5
if self.c_w_[j] == c_xi:
# Attract prototype to data point
self.w_[j] += c * (self._p(j, xi, prototypes=self.w_, y=c_xi) -
self._p(j, xi, prototypes=self.w_)) * d
else:
# Distance prototype from data point
self.w_[j] -= c * self._p(j, xi, prototypes=self.w_) * d
def _costf(self, x, w, **kwargs):
d = (x - w)[np.newaxis].T
d = d.T.dot(d)
return -d / (2 * self.sigma)
def _p(self, j, e, y=None, prototypes=None, **kwargs):
if prototypes is None:
prototypes = self.w_
if y is None:
fs = [self._costf(e, w, **kwargs) for w in prototypes]
else:
fs = [self._costf(e, prototypes[i], **kwargs) for i in
range(prototypes.shape[0]) if
self.c_w_[i] == y]
fs_max = max(fs)
s = sum([np.math.exp(f - fs_max) for f in fs])
o = np.math.exp(
self._costf(e, prototypes[j], **kwargs) - fs_max) / s
return o
def get_prototypes(self):
"""Returns the prototypes"""
return self.w_
def predict(self, x):
"""Predict class membership index for each input sample.
This function does classification on an array of
test vectors X.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
check_is_fitted(self, ['w_', 'c_w_'])
x = validation.check_array(x)
if x.shape[1] != self.w_.shape[1]:
raise ValueError("X has wrong number of features\n"
"found=%d\n"
"expected=%d" % (self.w_.shape[1], x.shape[1]))
return np.array([self.c_w_[np.array([self._costf(xi,p) for p in self.w_]).argmax()] for xi in x])
def posterior(self, y, x):
"""
calculate the posterior for x:
p(y|x)
Parameters
----------
y: class
label
x: array-like, shape = [n_features]
sample
Returns
-------
posterior
:return: posterior
"""
check_is_fitted(self, ['w_', 'c_w_'])
x = validation.column_or_1d(x)
if y not in self.classes_:
raise ValueError('y must be one of the labels\n'
'y=%s\n'
'labels=%s' % (y, self.classes_))
s1 = sum([self._costf(x, self.w_[i]) for i in
range(self.w_.shape[0]) if
self.c_w_[i] == y])
s2 = sum([self._costf(x, w) for w in self.w_])
return s1 / s2
def get_info(self):
return 'RSLVQ'
def predict_proba(self, X):
""" predict_proba
Predicts the probability of each sample belonging to each one of the
known target_values.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
A matrix of the samples we want to predict.
Returns
-------
numpy.ndarray
An array of shape (n_samples, n_features), in which each outer entry is
associated with the X entry of the same index. And where the list in
index [i] contains len(self.target_values) elements, each of which represents
the probability that the i-th sample of X belongs to a certain label.
"""
return 'Not implemented'
def reset(self):
self.__init__()
def _validate_train_parms(self, train_set, train_lab, classes=None):
random_state = validation.check_random_state(self.random_state)
train_set, train_lab = validation.check_X_y(train_set, train_lab)
if(self.initial_fit):
if(classes):
self.classes_ = np.asarray(classes)
self.protos_initialized = np.zeros(self.classes_.size)
else:
self.classes_ = unique_labels(train_lab)
self.protos_initialized = np.zeros(self.classes_.size)
nb_classes = len(self.classes_)
nb_samples, nb_features = train_set.shape # nb_samples unused
# set prototypes per class
if isinstance(self.prototypes_per_class, int):
if self.prototypes_per_class < 0 or not isinstance(
self.prototypes_per_class, int):
raise ValueError("prototypes_per_class must be a positive int")
# nb_ppc = number of protos per class
nb_ppc = np.ones([nb_classes],
dtype='int') * self.prototypes_per_class
else:
nb_ppc = validation.column_or_1d(
validation.check_array(self.prototypes_per_class,
ensure_2d=False, dtype='int'))
if nb_ppc.min() <= 0:
raise ValueError(
"values in prototypes_per_class must be positive")
if nb_ppc.size != nb_classes:
raise ValueError(
"length of prototypes per class"
" does not fit the number of classes"
"classes=%d"
"length=%d" % (nb_classes, nb_ppc.size))
# initialize prototypes
if self.initial_prototypes is None:
#self.w_ = np.repeat(np.array([self.geometric_median(train_set[train_lab == l],"minimize") for l in self.classes_]),nb_ppc,axis=0)
#self.c_w_ = np.repeat(self.classes_,nb_ppc)
if self.initial_fit:
self.w_ = np.empty([np.sum(nb_ppc), nb_features], dtype=np.double)
self.c_w_ = np.empty([nb_ppc.sum()], dtype=self.classes_.dtype)
pos = 0
for actClass in range(len(self.classes_)):
nb_prot = nb_ppc[actClass] # nb_ppc: prototypes per class
if(self.protos_initialized[actClass] == 0 and actClass in unique_labels(train_lab)):
mean = np.mean(
train_set[train_lab == self.classes_[actClass], :], 0)
self.w_[pos:pos + nb_prot] = mean + (
random_state.rand(nb_prot, nb_features) * 2 - 1)
if math.isnan(self.w_[pos, 0]):
print('null: ', actClass)
self.protos_initialized[actClass] = 0
else:
self.protos_initialized[actClass] = 1
#
self.c_w_[pos:pos + nb_prot] = self.classes_[actClass]
pos += nb_prot
else:
x = validation.check_array(self.initial_prototypes)
self.w_ = x[:, :-1]
self.c_w_ = x[:, -1]
if self.w_.shape != (np.sum(nb_ppc), nb_features):
raise ValueError("the initial prototypes have wrong shape\n"
"found=(%d,%d)\n"
"expected=(%d,%d)" % (
self.w_.shape[0], self.w_.shape[1],
nb_ppc.sum(), nb_features))
if set(self.c_w_) != set(self.classes_):
raise ValueError(
"prototype labels and test data classes do not match\n"
"classes={}\n"
"prototype labels={}\n".format(self.classes_, self.c_w_))
if self.initial_fit:
self.initial_fit = False
return train_set, train_lab, random_state
def fit(self, X, y, classes=None):
"""Fit the LVQ model to the given training data and parameters using
l-bfgs-b.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
Returns
--------
self
"""
X, y, random_state = self._validate_train_parms(X, y, classes=classes)
if len(np.unique(y)) == 1:
raise ValueError("fitting " + type(
self).__name__ + " with only one class is not possible")
self._optimize(X, y, random_state)
return self
def partial_fit(self, X, y, classes=None):
"""Fit the LVQ model to the given training data and parameters using
l-bfgs-b.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
Returns
--------
self
"""
if unique_labels(y) in self.classes_ or self.initial_fit:
X, y, random_state = self._validate_train_parms(
X, y, classes=classes)
else:
raise ValueError('Class {} was not learned - please declare all \
classes in first call of fit/partial_fit'.format(y))
self.counter = self.counter + 1
if self.drift_handling is not None and self.concept_drift_detection(X,y):
self.cd_handling(X,y)
if self.counter > 30:
self.save_data(X,y,random_state)
self.cd_detects.append(self.counter)
print(self.w_.shape)
self._optimize(X, y, random_state)
return self
def save_data(self,X,y,random_state):
pd.DataFrame(self.w_).to_csv("Prototypes.csv")
| pd.DataFrame(self.c_w_) | pandas.DataFrame |
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
import string
from matplotlib.dates import MonthLocator, DayLocator, WeekdayLocator, MO, TU, WE, TH, FR, SA, SU
def plot_rca_timeseries_oneradar(
rca_file, output_directory, baseline_date, polarization, scan_type, site, inst, start_date, end_date
):
"""
plot_rca_timeseries_oneradar
Parameters
----------
rca_file: str
path to RCA CSV file
output_directory: str
path to directory for output .png file(s)
baseline_date: str
YYYY-MM-DD format of baseline date in this dataset
polarization: str
specify the polarization(s) desired
'horizontal'
'dual'
scan_type: str
specify if the map is for PPI or RHI
'ppi'
'rhi'
site: str
site abbreviation
inst: str
instrument name
start_date: str
Start date of plot, form YYYY-MM-DD
end_date: str
End date of plot, form YYYY-MM-DD
"""
###############################################################
# Plotting rc parameters
# xtick
plt.rc('xtick', color='k', labelsize=10, direction='out')
plt.rc('xtick.major', size=4, pad=4)
plt.rc('xtick.minor', size=2, pad=4)
# ytick
plt.rc('ytick', color='k', labelsize=10, direction='in')
plt.rc('ytick.major', size=4, pad=4)
plt.rc('ytick.minor', size=2, pad=4)
# figure
plt.rc('figure', titlesize=12, figsize=[8,4], dpi=500, autolayout=False)
# legend
plt.rc('legend', loc='best')
# lines
plt.rc('lines', linewidth=0.5, linestyle='-', marker='o', markersize=3.0)
# font
plt.rc('font', family='sans', style='normal')
# text
plt.rc('mathtext', fontset='dejavusans')
# axes
plt.rc('axes', facecolor='white', linewidth=0.8, grid=True, titlesize=14, labelsize=12)
plt.rc('axes.grid', axis='both', which='both')
# dates
#plt.rc('date.autoformatter', day='%Y-%m-%d')
###############################################################
# Convert string dates to datetime for plotting
baseline_date = pd.to_datetime(baseline_date, format='%Y-%m-%d')
start_date = | pd.to_datetime(start_date, format='%Y-%m-%d') | pandas.to_datetime |
from typing import Tuple, Union, Optional
import os
import pytest
from PIL import Image
from pathlib import Path
import scanpy as sc
import scvelo as scv
import cellrank as cr
from anndata import AnnData
from cellrank.tl.kernels import VelocityKernel, PrecomputedKernel, ConnectivityKernel
import numpy as np
import pandas as pd
from sklearn.svm import SVR
from scipy.sparse import spdiags, issparse, csr_matrix
from pandas.testing import assert_frame_equal, assert_series_equal
def _jax_not_installed() -> bool:
try:
import jax
import jaxlib
return False
except ImportError:
return True
def _rpy2_mgcv_not_installed() -> bool:
try:
import rpy2
from packaging import version
from rpy2.robjects.packages import PackageNotInstalledError, importr
try:
from importlib_metadata import version as get_version
except ImportError:
# >=Python3.8
from importlib.metadata import version as get_version
try:
assert version.parse(get_version(rpy2.__name__)) >= version.parse("3.3.0")
_ = importr("mgcv")
return False
except (PackageNotInstalledError, AssertionError):
pass
except ImportError:
pass
return True
def bias_knn(
conn: csr_matrix,
pseudotime: np.ndarray,
n_neighbors: int,
k: int = 3,
frac_to_keep: Optional[float] = None,
) -> csr_matrix:
# frac_to_keep=None mimics original impl. (which mimics Palantir)
k_thresh = max(0, min(int(np.floor(n_neighbors / k)) - 1, 30))
conn_biased = conn.copy()
# check whether the original graph was connected
assert _is_connected(conn), "The underlying KNN graph is disconnected."
for i in range(conn.shape[0]):
# get indices, values and current pseudo t
row_data = conn[i, :].data
row_ixs = conn[i, :].indices
current_t = pseudotime[i]
if frac_to_keep is not None:
k_thresh = max(0, min(30, int(np.floor(len(row_data) * frac_to_keep))))
# get the 'candidates' - ixs of nodes not in the k_thresh closest neighbors
p = np.flip(np.argsort(row_data))
sorted_ixs = row_ixs[p]
cand_ixs = sorted_ixs[k_thresh:]
# compare pseudotimes and set indices to zero
cand_t = pseudotime[cand_ixs]
rem_ixs = cand_ixs[cand_t < current_t]
conn_biased[i, rem_ixs] = 0
conn_biased.eliminate_zeros()
# check whether the biased graph is still connected
assert _is_connected(conn_biased), "The biased KNN graph has become disconnected."
return conn_biased
def density_normalization(velo_graph, trans_graph):
# function copied from scanpy
q = np.asarray(trans_graph.sum(axis=0))
if not issparse(trans_graph):
Q = np.diag(1.0 / q)
else:
Q = spdiags(1.0 / q, 0, trans_graph.shape[0], trans_graph.shape[0])
velo_graph = Q @ velo_graph @ Q
return velo_graph
def _is_connected(c) -> bool:
import networkx as nx
from scipy.sparse import issparse
G = nx.from_scipy_sparse_matrix(c) if issparse(c) else nx.from_numpy_array(c)
return nx.is_connected(G)
def create_kernels(
adata: AnnData,
velocity_variances: Optional[str] = None,
connectivity_variances: Optional[str] = None,
) -> Tuple[VelocityKernel, ConnectivityKernel]:
vk = VelocityKernel(adata)
vk._mat_scaler = adata.obsp.get(
velocity_variances, np.random.normal(size=(adata.n_obs, adata.n_obs))
)
ck = ConnectivityKernel(adata)
ck._mat_scaler = adata.obsp.get(
connectivity_variances, np.random.normal(size=(adata.n_obs, adata.n_obs))
)
vk._transition_matrix = csr_matrix(np.eye(adata.n_obs))
ck._transition_matrix = np.eye(adata.n_obs, k=1) / 2 + np.eye(adata.n_obs) / 2
ck._transition_matrix[-1, -1] = 1
ck._transition_matrix = csr_matrix(ck._transition_matrix)
np.testing.assert_allclose(
np.sum(ck._transition_matrix.A, axis=1), 1
) # sanity check
return vk, ck
# TODO: make it a fixture
def create_model(adata: AnnData) -> cr.ul.models.SKLearnModel:
return cr.ul.models.SKLearnModel(adata, SVR(kernel="rbf"))
# TODO: make it a fixture
def create_failed_model(adata: AnnData) -> cr.ul.models.FailedModel:
return cr.ul.models.FailedModel(create_model(adata), exc="foobar")
def resize_images_to_same_sizes(
expected_image_path: Union[str, Path],
actual_image_path: Union[str, Path],
kind: str = "actual_to_expected",
) -> None:
if not os.path.isfile(actual_image_path):
raise OSError(f"Actual image path `{actual_image_path!r}` does not exist.")
if not os.path.isfile(expected_image_path):
raise OSError(f"Expected image path `{expected_image_path!r}` does not exist.")
expected_image = Image.open(expected_image_path)
actual_image = Image.open(actual_image_path)
if expected_image.size != actual_image.size:
if kind == "actual_to_expected":
actual_image.resize(expected_image.size).save(actual_image_path)
elif kind == "expected_to_actual":
expected_image.resize(actual_image.size).save(expected_image)
else:
raise ValueError(
f"Invalid kind of conversion `{kind!r}`."
f"Valid options are `'actual_to_expected'`, `'expected_to_actual'`."
)
def assert_array_nan_equal(
actual: Union[np.ndarray, pd.Series], expected: Union[np.ndarray, pd.Series]
) -> None:
"""
Test is 2 arrays or :class:`pandas.Series` are equal.
Params
------
actual
The actual data.
expected
The expected result.
Returns
-------
None
Nothing, but raises an exception if arrays are not equal, including the locations of NaN values.
"""
mask1 = ~(pd.isnull(actual) if isinstance(actual, pd.Series) else np.isnan(actual))
mask2 = ~(
| pd.isnull(expected) | pandas.isnull |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('flask')
pytest.importorskip('flask.ext.cors')
from base64 import b64encode
from copy import copy
import datashape
from datashape.util.testing import assert_dshape_equal
import numpy as np
from odo import odo, convert
from datetime import datetime
import pandas as pd
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from toolz import pipe
from blaze.dispatch import dispatch
from blaze.expr import Expr
from blaze.utils import example
from blaze import discover, symbol, by, CSV, compute, join, into, data
from blaze.server.client import mimetype
from blaze.server.server import Server, to_tree, from_tree, RC
from blaze.server.serialization import all_formats, trusted_formats, fastmsgpack
accounts = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
cities = DataFrame([['Alice', 'NYC'], ['Bob', 'LA']],
columns=['name', 'city'])
events = DataFrame([[1, datetime(2000, 1, 1, 12, 0, 0)],
[2, datetime(2000, 1, 2, 12, 0, 0)]],
columns=['value', 'when'])
db = data('sqlite:///' + example('iris.db'))
class DumbResource(object):
df = DataFrame({'a': np.arange(5),
'b': np.arange(5, 10)})
class NoResource(Exception):
pass
@convert.register(DataFrame, DumbResource)
def dumb_to_df(d, return_df=None, **kwargs):
if return_df is None:
raise DumbResource.NoResource('return_df must be passed')
to_return = odo(return_df, DataFrame, dshape=discover(d))
assert_frame_equal(to_return, DumbResource.df)
return to_return
@dispatch(Expr, DumbResource)
def compute_down(expr, d, **kwargs):
return dumb_to_df(d, **kwargs)
@discover.register(DumbResource)
def _discover_dumb(d):
return discover(DumbResource.df)
tdata = {'accounts': accounts,
'cities': cities,
'events': events,
'db': db,
'dumb': DumbResource()}
@pytest.fixture(scope='module')
def server():
s = Server(tdata, all_formats)
s.app.testing = True
return s
@pytest.fixture(scope='module')
def add_server():
s = Server(tdata, all_formats, allow_add=True)
s.app.testing = True
return s
@pytest.yield_fixture(params=[None, tdata])
def temp_server(request):
"""For when we want to mutate the server"""
data = request.param
s = Server(copy(data), formats=all_formats)
s.app.testing = True
with s.app.test_client() as c:
yield c
@pytest.yield_fixture(params=[None, tdata])
def temp_add_server(request):
"""For when we want to mutate the server, and also add datasets to it."""
data = request.param
s = Server(copy(data), formats=all_formats, allow_add=True)
s.app.testing = True
with s.app.test_client() as c:
yield c
@pytest.yield_fixture
def test(server):
with server.app.test_client() as c:
yield c
@pytest.yield_fixture
def test_add(add_server):
with add_server.app.test_client() as c:
yield c
@pytest.yield_fixture
def iris_server():
iris = CSV(example('iris.csv'))
s = Server(iris, all_formats, allow_add=True)
s.app.testing = True
with s.app.test_client() as c:
yield c
def test_datasets(test):
response = test.get('/datashape')
assert_dshape_equal(datashape.dshape(response.data.decode('utf-8')),
datashape.dshape(discover(tdata)))
@pytest.mark.parametrize('serial', all_formats)
def test_bad_responses(test, serial):
post = test.post('/compute/accounts.{name}'.format(name=serial.name),
data=serial.dumps(500),)
assert 'OK' not in post.status
post = test.post('/compute/non-existent-table.{name}'.format(name=serial.name),
data=serial.dumps(0))
assert 'OK' not in post.status
post = test.post('/compute/accounts.{name}'.format(name=serial.name))
assert 'OK' not in post.status
def test_to_from_json():
t = symbol('t', 'var * {name: string, amount: int}')
assert from_tree(to_tree(t)).isidentical(t)
assert from_tree(to_tree(t.amount + 1)).isidentical(t.amount + 1)
def test_to_tree():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount.sum()
dshape = datashape.dshape('var * {name: string, amount: int32}',)
sum_args = [{'op': 'Field',
'args': [{'op': 'Symbol',
'args': ['t', dshape, 0]},
'amount']},
[0],
False]
expected = {'op': 'sum', 'args': sum_args}
assert to_tree(expr) == expected
@pytest.mark.parametrize('serial', all_formats)
def test_to_tree_slice(serial):
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t[:5]
expr2 = pipe(expr, to_tree, serial.dumps, serial.loads, from_tree)
assert expr.isidentical(expr2)
def test_to_from_tree_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.name
tree = to_tree(expr, names={t: 't'})
assert tree == {'op': 'Field', 'args': ['t', 'name']}
new = from_tree(tree, namespace={'t': t})
assert new.isidentical(expr)
def test_from_tree_is_robust_to_unnecessary_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount + 1
tree = to_tree(expr) # don't use namespace
assert from_tree(tree, {'t': t}).isidentical(expr)
t = symbol('t', discover(tdata))
@pytest.mark.parametrize('serial', all_formats)
def test_compute(test, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
expected = 300
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
tdata = serial.loads(response.data)
assert serial.data_loads(tdata['data']) == expected
assert list(tdata['names']) == ['amount_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_get_datetimes(test, serial):
expr = t.events
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
tdata = serial.loads(response.data)
ds = datashape.dshape(tdata['datashape'])
result = into(np.ndarray,
serial.data_loads(tdata['data']),
dshape=ds)
assert into(list, result) == into(list, events)
assert list(tdata['names']) == events.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def dont_test_compute_with_namespace(test, serial):
query = {'expr': {'op': 'Field',
'args': ['accounts', 'name']}}
expected = ['Alice', 'Bob']
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
tdata = serial.loads(response.data)
assert serial.data_loads(tdata['data']) == expected
assert tdata['names'] == ['name']
iris = CSV(example('iris.csv'))
@pytest.mark.parametrize('serial', all_formats)
def test_compute_with_variable_in_namespace(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
pl = symbol('pl', 'float32')
expr = t[t.petal_length > pl].species
tree = to_tree(expr, {pl: 'pl'})
blob = serial.dumps({'expr': tree, 'namespace': {'pl': 5}})
resp = test.post('/compute',
data=blob,
headers=mimetype(serial))
assert 'OK' in resp.status
tdata = serial.loads(resp.data)
result = serial.data_loads(tdata['data'])
expected = list(compute(expr._subs({pl: 5}), {t: iris}))
assert odo(result, list) == expected
assert list(tdata['names']) == ['species']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_by_with_summary(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
expr = by(t.species,
max=t.petal_length.max(),
sum=t.petal_width.sum())
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post('/compute',
data=blob,
headers=mimetype(serial))
assert 'OK' in resp.status
tdata = serial.loads(resp.data)
result = DataFrame(serial.data_loads(tdata['data'])).values
expected = compute(expr, iris).values
np.testing.assert_array_equal(result[:, 0],
expected[:, 0])
np.testing.assert_array_almost_equal(result[:, 1:],
expected[:, 1:])
assert list(tdata['names']) == ['species', 'max', 'sum']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_column_wise(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
subexpr = ((t.petal_width / 2 > 0.5) &
(t.petal_length / 2 > 0.5))
expr = t[subexpr]
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post('/compute',
data=blob,
headers=mimetype(serial))
assert 'OK' in resp.status
tdata = serial.loads(resp.data)
result = serial.data_loads(tdata['data'])
expected = compute(expr, iris)
assert list(map(tuple, into(list, result))) == into(list, expected)
assert list(tdata['names']) == t.fields
@pytest.mark.parametrize('serial', all_formats)
def test_multi_expression_compute(test, serial):
s = symbol('s', discover(tdata))
expr = join(s.accounts, s.cities)
resp = test.post('/compute',
data=serial.dumps({'expr': to_tree(expr)}),
headers=mimetype(serial))
assert 'OK' in resp.status
respdata = serial.loads(resp.data)
result = serial.data_loads(respdata['data'])
expected = compute(expr, {s: tdata})
assert list(map(tuple, odo(result, list))) == into(list, expected)
assert list(respdata['names']) == expr.fields
@pytest.mark.parametrize('serial', all_formats)
def test_leaf_symbol(test, serial):
query = {'expr': {'op': 'Field', 'args': [':leaf', 'cities']}}
resp = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
tdata = serial.loads(resp.data)
a = serial.data_loads(tdata['data'])
b = into(list, cities)
assert list(map(tuple, into(list, a))) == b
assert list(tdata['names']) == cities.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def test_sqlalchemy_result(test, serial):
expr = t.db.iris.head(5)
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
tdata = serial.loads(response.data)
result = serial.data_loads(tdata['data'])
if isinstance(result, list):
assert all(isinstance(item, (tuple, list)) for item in result)
elif isinstance(result, DataFrame):
expected = DataFrame([[5.1, 3.5, 1.4, 0.2, 'Iris-setosa'],
[4.9, 3.0, 1.4, 0.2, 'Iris-setosa'],
[4.7, 3.2, 1.3, 0.2, 'Iris-setosa'],
[4.6, 3.1, 1.5, 0.2, 'Iris-setosa'],
[5.0, 3.6, 1.4, 0.2, 'Iris-setosa']],
columns=['sepal_length',
'sepal_width',
'petal_length',
'petal_width',
'species'])
assert_frame_equal(expected, result)
assert list(tdata['names']) == t.db.iris.fields
def test_server_accepts_non_nonzero_ables():
Server(DataFrame())
def serialize_query_with_map_builtin_function(test, serial, fcn):
"""
serialize a query that invokes the 'map' operation using a builtin function
return the result of the post operation along with expected result
"""
t = symbol('t', discover(iris))
expr = t.species.map(fcn, 'int')
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = serial.data_loads(respdata['data'])
exp_res = compute(expr, {t: iris}, return_type=list)
return (exp_res, result)
@pytest.mark.parametrize('serial', trusted_formats)
def test_map_builtin_client_server(iris_server, serial):
exp_res, result = serialize_query_with_map_builtin_function(iris_server,
serial,
len)
# Pass through Series() to canonicalize results.
assert (pd.Series(result) == pd.Series(exp_res)).all()
@pytest.mark.parametrize('serial', trusted_formats)
def test_map_numpy_client_server(iris_server, serial):
exp_res, result = serialize_query_with_map_builtin_function(iris_server,
serial,
np.size)
# Pass through Series() to canonicalize results.
assert (pd.Series(result) == | pd.Series(exp_res) | pandas.Series |
import requests
import time
import json
import os
from tqdm import tqdm
import pandas as pd
def searchPlace():
'''Get all places' name and vicinity around
GPS location point from list of location point.
List of location point consists of strings
in form [latitude,longitude]. (without square brackets)
From each point, Google Places API gets
all places info around 70 meters, which ranges circle.
Current location file almost covers
area of 부산광역시 금정구 장전동.
latitude-890 makes area circle move down,
longitude+1123 makes area circle move right.
'''
filename = "./location"
f = open(filename, "r")
locationList = []
key = os.environ['GOOGLE_PLACES_KEY']
for location in tqdm(f.readlines()):
locationString = location.strip()
URL = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = {'key': key, 'location': locationString,
'radius': 70, 'language': 'ko'}
resp = requests.get(URL, params=params)
jsonObject = json.loads(resp.text)
responseResult = jsonObject.get('results')
nextpageToken = jsonObject.get('next_page_token')
for loc in responseResult:
locationList.append([loc["vicinity"], loc["name"]])
while nextpageToken is not None:
time.sleep(2)
params = {'key': key, 'pagetoken': nextpageToken, 'language': 'ko'}
resp = requests.get(URL, params=params)
jsonObject = json.loads(resp.text)
responseResult = jsonObject.get('results')
nextpageToken = jsonObject.get('next_page_token')
for loc in responseResult:
locationList.append([loc["vicinity"], loc["name"]])
df = | pd.DataFrame(locationList, columns=['vicinity', 'name']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import tweepy
import tqdm
import csv
import json
import time
from tqdm import tqdm_notebook as tqdm
def makeAuthConnection():
consumerApiKey = 'XXXXXXX'
consumerApiSecret = 'XXXXXXX'
acessToken = 'XXXXXX'
acessTokenSecret = 'XXXXXX'
auth = tweepy.OAuthHandler(consumerApiKey, consumerApiSecret)
#auth = tweepy.AppAuthHandler(consumerApiKey, consumerApiSecret)
auth.set_access_token(acessToken, acessTokenSecret)
return tweepy.API(auth , wait_on_rate_limit = True,wait_on_rate_limit_notify = True)
# In[3]:
api = makeAuthConnection()
# for status in tweepy.Cursor(api.search, q='tweepy').items(10):
# print(status.text)
# In[4]:
def checkRemainingSearchCount():
jsonString = api.rate_limit_status()['resources']['search']['/search/tweets']
upperLimit = jsonString['limit']
remiaingFetch = jsonString['remaining']
#resetTime = jsonString['reset']/60000
print (jsonString)
return upperLimit,remiaingFetch
# In[5]:
checkRemainingSearchCount()
# This method will generate a file containng the tweets of the data
# This uses the tweepy API to fetch the data
# TODO This method generate the maxind tweets twice. Will have to check on it.
def searchTweetsByHashtag(searchlist):
# use this filter to filter the tweets based on the key words -filter:retweets AND -filter:replies
searchFilter = ' AND -filter:links and -filter:videos and -filter:retweets'
fileName = 'tweetDataset.csv'
with open (fileName,'a', newline='',encoding='utf-8') as sampleFile:
writer = csv.writer(sampleFile,quoting = csv.QUOTE_NONNUMERIC)
try:
for searchString in searchlist:
search_result = api.search(q=searchString + searchFilter,count=1,lang="en",tweet_mode='extended'
, result_type = 'recent')
if(len(search_result) == 0):
print("*************No data on "+ searchString +" hashtag.***************")
else :
max_id = search_result[0].id
#print("max_id",max_id)
old_id = -1
i = 1
while(max_id != old_id):
old_id = max_id
tweetDic = tweepy.Cursor(api.search,q = searchString + searchFilter ,lang = 'en'
,include_entities=False,tweet_mode='extended',count = 100
,max_id = max_id).items(300)
print("loop count",i)
for tweets in tweetDic:
jsonString = tweets._json
#print(jsonString['id'],jsonString['full_text'].replace('\n', ' '))
csv_row = [jsonString['id'],jsonString['user']['screen_name'],jsonString['retweet_count']
,jsonString['full_text'].replace('\n', ' ')]
# we can also encode the text here to remove emojies from the text.
max_id = jsonString['id'] + 1
writer.writerow(csv_row)
print("Going to sleep to keep limit to check")
time.sleep(3)
print("Waking Up")
print("*************No more data to exact.*************")
except tweepy.TweepError as e:
print("Some error!!:"+str(e))
# In[8]:
search_criteria = ['#MotichoorChaknachoorReview','#jhalkireview','#FordVsFerrari','#MotherlessBrooklyn'
,'#Charlie\'sAngels','#DoctorSleepReview','#MidwayMovie','#Actionreview','#SangathamizhanReview'
,'#JhalleReview']
searchTweetsByHashtag(search_criteria)
# secound File
#!/usr/bin/env python
# coding: utf-8
# In[46]:
import numpy as np
import pandas as pd
import nltk
import matplotlib.pyplot as plt
import seaborn as sea
import copy
import emoji
import time as time
from nltk.tokenize import TweetTokenizer
from nltk.corpus import sentiwordnet as swm
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from wordcloud import WordCloud, STOPWORDS
from textblob import TextBlob
from afinn import Afinn
from statistics import mode
# In[47]:
data = pd.read_csv('InitialData.csv',header = None)
data = data.iloc[:,3]
# In[3]:
print(data.shape)
# # Data Preprocessing
# ### Removing handle name and hashtags
# In[4]:
def dataCleaning(data):
# reger for handle, for RT and for URls
regexes = ['@[A-Z0-9a-z_:]+','^[RT]+','https?://[A-Za-z0-9./]+','(#\w+)','[!,)(.:*“”""+_’\'?\-]+']
for regex in regexes:
data = data.replace(to_replace =regex, value = '', regex = True)
data = data.str.strip()
data = data.str.lower()
return data
# In[5]:
data = dataCleaning(data)
# In[6]:
data.tail(10)
# ### Encode tweets so as to simplify the Emojis
# In[7]:
def encodeString(tweets):
return tweets.encode('ascii', 'ignore').decode('ascii')
# In[8]:
data = data.apply(emoji.demojize)
# In[9]:
data[25]
# In[10]:
data = data.replace(to_replace ='[_:]+', value = ' ', regex = True)
# In[11]:
data.iloc[25]
# ### Removing dublicate rows
# In[12]:
def removeDublicate(data):
print(data.shape[0])
dublicateRows=data.duplicated().tolist()
if len(dublicateRows) > 0:
print("Completly Dublicate rows",dublicateRows.count(True))
dublicateRows=data.iloc[:].duplicated().tolist()
if len(dublicateRows) > 0:
print("Dublicate Tweets",dublicateRows.count(True))
data=data.iloc[:].drop_duplicates()
return data;
# In[13]:
data = removeDublicate(data)
print(data.shape)
# In[14]:
# Remove word which has length less than 3
data = data.apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
# In[15]:
data.tail(20)
# ### Tokennization and POS tagging
# In[16]:
def convertToPosTag(tokens):
tagged_sent = nltk.pos_tag(tokens)
store_it = [(word, nltk.map_tag('en-ptb', 'universal', tag)) for word, tag in tagged_sent]
return store_it
# In[17]:
tt = TweetTokenizer()
tokenizedTweets = data.apply(tt.tokenize)
POStaggedLabel = tokenizedTweets.apply(convertToPosTag)
POStaggedLabel[0]
# In[18]:
POStaggedLabel[25]
# ### Removing STOP word and lemmatizing the tweets
# In[36]:
def ConvertToSimplerPosTag(tag):
if(tag=='NOUN'):
tag='n'
elif(tag=='VERB'):
tag='v'
elif(tag=='ADJ'):
tag='a'
elif(tag=='ADV'):
tag = 'r'
else:
tag='nothing'
return tag
# In[37]:
stop_words = stopwords.words('english')
pstem = PorterStemmer()
lem = WordNetLemmatizer()
# In[38]:
def removeStopWord(row):
filteredList = [(i,j) for i,j in row if i not in stop_words ]
return filteredList
# In[39]:
noStopWordList = POStaggedLabel.apply(removeStopWord)
# In[42]:
def lemmatize(row):
lemmatizeWord = [lem.lemmatize(w) for w,tag in row] #,pos= ConvertToSimplerPosTag(tag)
return [pstem.stem(i) for i in lemmatizeWord]
# In[43]:
lemmatizedDF = noStopWordList.apply(lemmatize)
# In[44]:
lemmatizedDF.head()
# # Ground Truth Labling
# In[48]:
modelType = ["Text Blob","SentiWordNet","Afinn",'Combined']
negative = []
neutral = []
positive =[]
# ### Labeling the tweets with TextBlob
# In[49]:
def getLabels(row):
polarity = TextBlob(" ".join(row)).sentiment.polarity
return 1 if polarity > 0 else 0 if polarity == 0 else -1
# In[50]:
SetimentLabel = tokenizedTweets.apply(getLabels)
# In[51]:
valueCountSentiment = SetimentLabel.value_counts()
# In[52]:
print(valueCountSentiment.sort_index())
count = list(valueCountSentiment.sort_index())
# In[53]:
print(count)
negative.append(count[0])
neutral.append(count[1])
positive.append(count[2])
# ### Labeling the tweets with sentiwordnet
# In[54]:
def ConvertToSimplerPosTag(tag):
if(tag=='NOUN'):
tag='n'
elif(tag=='VERB'):
tag='v'
elif(tag=='ADJ'):
tag='a'
elif(tag=='ADV'):
tag = 'r'
else:
tag='nothing'
return tag
# In[55]:
def getSentimentOfWorld(row):
positiveScore = []
negativeScore = []
for word ,tag in row:
try:
tag = ConvertToSimplerPosTag(tag)
if(tag!='nothing'):
concat = word+'.'+ tag+ '.01'
positiveScore.append(swm.senti_synset(concat).pos_score())
negativeScore.append(swm.senti_synset(concat).neg_score())
except Exception as e:
#print (e)
#print("An exception occurred")
pstem = PorterStemmer()
lem = WordNetLemmatizer()
word = lem.lemmatize(word)
word = pstem.stem(word)
concat = word+'.'+ tag+ '.01'
try:
positiveScore.append(swm.senti_synset(concat).pos_score())
negativeScore.append(swm.senti_synset(concat).neg_score())
except Exception as ex:
pass
#print("Nested error.")
#continue
postiveScoreTotal = np.sum(positiveScore)
negativeScoreTotal = np.sum(negativeScore)
if(postiveScoreTotal > negativeScoreTotal) :
return 1
elif (postiveScoreTotal < negativeScoreTotal) :
return -1
else:
return 0
# In[56]:
sentiDF = POStaggedLabel.apply(getSentimentOfWorld)
# In[57]:
count = list(sentiDF.value_counts().sort_index())
# In[58]:
print(count)
negative.append(count[0])
neutral.append(count[1])
positive.append(count[2])
# ### Labeling Tweets with AFINN
# In[59]:
def getSentimentAfinn(row):
af = Afinn()
polarity = af.score(" ".join(row))
return 1 if polarity > 0 else 0 if polarity == 0 else -1
# In[60]:
AfinnLabel = tokenizedTweets.apply(getSentimentAfinn)
# In[61]:
count=list(AfinnLabel.value_counts().sort_values())
print(count)
negative.append(count[0])
neutral.append(count[1])
positive.append(count[2])
# # Combing the result of All the sentiment analysor above
# In[62]:
def assignLabel(row):
notAssigned = []
try:
return mode(row)
except Exception as ex:
return row[1]
# In[63]:
combineLabel = pd.concat([SetimentLabel ,sentiDF, AfinnLabel ] , axis = 1,sort=False)
combineLabel.columns = [1,2,3]
# In[64]:
yLabel= combineLabel.apply(assignLabel,axis =1)
# In[65]:
count = list(yLabel.value_counts().sort_values())
negative.append(count[0])
neutral.append(count[1])
positive.append(count[2])
# In[66]:
print(len(yLabel))
print(len(lemmatizedDF))
# In[67]:
def autolabel(ax,rects, xpos='center'):
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0, 'right': 1, 'left': -1}
for rect in rects:
height = float("{0:.2f}".format(rect.get_height()))
height = int(height)
ax.annotate('{}'.format(height),xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(offset[xpos]*3, 3), # use 3 points offset
textcoords="offset points", # in both directions
ha=ha[xpos], va='bottom')
# In[96]:
def plotComparisionGraph(modelType,negative,neutral,positive,endValue):
print(len(negative))
ind = np.array([i for i in range(3,endValue,3)]) # the x locations for the groups
print(ind)
width = 0.65 # the width of the bars
fig, ax = plt.subplots(figsize = (6,5) )
rects1 = ax.bar(ind- width , negative, width,label='Accuracy') #yerr=men_std
rects2 = ax.bar(ind, neutral, width, label='Precision') #yerr=women_std,
rects3 = ax.bar(ind+ width, positive, width, label='Recall') #yerr=women_std,
#rects4 = ax.bar(ind+ (1.5*width), f1ScoreList, width, label='F1-Score') #yerr=women_std,
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Count')
#ax.set_title('Count comparision between differnet Lexicon Model')
ax.set_xticks(ind)
ax.set_xticklabels(modelType)
ax.legend(loc='upper center', bbox_to_anchor=(0.90, 0.8), ncol=1) #shadow=True
autolabel(ax,rects1, "center")
autolabel(ax,rects2, "center")
autolabel(ax,rects3, "center")
#autolabel(ax,rects4, "center")
#fig.tight_layout()
plt.show()
# In[97]:
plotComparisionGraph(modelType,negative,neutral,positive,13)
# ### Visualize with the help of WorldCloud
# In[60]:
def plotWorldCould(Flattenlist,label):
plt.rcParams['figure.figsize']=(10.0,8.0)
plt.rcParams['font.size']=10
stopwords = set(STOPWORDS)
text = " ".join(tweet for tweet in [" ".join(i) for i in Flattenlist])
#print(text)
print ("There are {} words in the combination of all tweets.".format(len(text)))
wordcloud = WordCloud(
background_color='black',
stopwords=stopwords,
max_words=250,
max_font_size=50,
width=500,
height=300,
random_state=42
).generate(str(text))
fig = plt.figure(1)
plt.imshow(wordcloud)
plt.axis('off')
plt.title(label)
plt.show()
#fig.savefig("word1.png", dpi=1400)
# In[61]:
# seperate the positive and negative data
#yLabel = SetimentLabel.to_numpy()
# In[62]:
def visualizedWordCloud(lemmatizedDF,yLabel):
# Ploting Tweets
pos = np.where(yLabel == 0)[0]
print(len(pos))
neutralTweets = lemmatizedDF.iloc[pos]
plotWorldCould(neutralTweets,"Neutral")
#Ploting Positive tweets
pos = np.where(yLabel == 1)[0]
print(len(pos))
print(len(lemmatizedDF))
positiveTweets = lemmatizedDF.iloc[pos]
plotWorldCould(positiveTweets,"Positive Word")
#Ploting negative
pos = np.where(yLabel == -1)[0]
print(len(pos))
negativeTweets = lemmatizedDF.iloc[pos]
plotWorldCould(negativeTweets,"Negative Word")
# In[63]:
visualizedWordCloud(lemmatizedDF,yLabel)
# # Removing Common words from the tweets
# In[64]:
def removeWords(row):
unwantedWord =['watch','film','movi','review']
row = [i for i in row if i not in unwantedWord]
return row
# In[65]:
lemmatizedDF = lemmatizedDF.apply(removeWords)
# In[66]:
#Re-visualized
visualizedWordCloud(lemmatizedDF,yLabel)
# # Saving PrepossedDF to CSV
#lemmatizedDF
joinedTweet = lemmatizedDF.apply(lambda x: str(" ".join(x)))
data = pd.concat([joinedTweet,yLabel],axis = 1 )
data.columns = ['tweets','label']
data.to_csv('PrepeocessedFile.csv', index=False)
#3rd File
#!/usr/bin/env python
# coding: utf-8
# In[53]:
import math
import pandas as pd
import numpy as np
import time as time
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import operator
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.utils import resample
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize
from nltk.tokenize import TweetTokenizer
from tqdm import tqdm_notebook as tqdm
# In[54]:
def readData(fileName):
data = pd.read_csv(fileName)
return data
# In[55]:
data = readData('PrepeocessedFile.csv')
# In[56]:
#data['tweets']= data['tweets'].apply(list)
data['label'].value_counts()
# In[57]:
xData = data.iloc[:,0]
yLabel = data.iloc[:,1]
# # Vectorized data
# In[6]:
vectorizedType = ['CV_1G','CV_2G','CV_3G','TV_1G','TV_2G','TV_3G']
accuracyList =[]
precisionList =[]
recallList =[]
f1ScoreList = []
# In[7]:
def plotCount(words,wordCount):
plt.figure(figsize=(8,6))
plt.bar(words[:10],wordCount[:10])
plt.xlabel('Words')
plt.ylabel('Frequency')
plt.title('Top words - Count Vectorizer')
plt.show()
# In[8]:
def testVectorizationNaiveBias(vectorisedData,yLabel):
xTrain, xTest, yTrain, yTest = train_test_split(vectorisedData, yLabel, test_size=0.25, random_state=27)
#initialize Model
NaiveModel = GaussianNB()
NaiveModel.fit(xTrain,yTrain)
predictedTrain = NaiveModel.predict(xTrain)
predictedTest = NaiveModel.predict(xTest)
accuracyTest = accuracy_score(predictedTest,list(yTest))
precisionTest = precision_score(predictedTest,list(yTest),average = 'macro')
recallTest = recall_score(predictedTest,list(yTest),average = 'macro')
f1Score = f1_score(predictedTest,list(yTest),average = 'macro')
print("Accuracy on Training",accuracy_score(predictedTrain,list(yTrain)))
print("Accuracy on Testing Set",accuracyTest)
print("Precision on Testing Set",precisionTest)
print("Recall on Testing Set",recallTest)
print("F1 score on Testing Set",f1Score)
return accuracyTest,precisionTest,recallTest,f1Score
# ### Vectorized with CountVector
# In[9]:
def countVectorize(xData,ngramRange):
cv=CountVectorizer(decode_error='ignore',lowercase=True,analyzer = 'word',ngram_range = ngramRange,max_features = 600 )
x_traincv=cv.fit_transform(xData)
x_trainCountVector = x_traincv.toarray()
columnsName = cv.get_feature_names()
ColwiseSum=x_trainCountVector.sum(axis=0)
wordCountPair = sorted(zip(columnsName,ColwiseSum),key=lambda pair: pair[1],reverse=True)
word = [x for x,y in wordCountPair]
counts = [y for x,y in wordCountPair]
plotCount(word,counts)
return x_trainCountVector
# In[10]:
ngramList = [(1,1),(1,2),(1,3)]
for ngramrange in ngramList:
vectorisedData = countVectorize(xData,ngramrange)
accuracyTest,precisionTest,recallTest,f1Score = testVectorizationNaiveBias(vectorisedData,yLabel)
accuracyList.append(accuracyTest)
precisionList.append(precisionTest)
recallList.append(recallTest)
f1ScoreList.append(f1Score)
# ### Vectorized with tfidfVectorized
# In[11]:
def tfidfVectorize(xData,ngramRange):
cv=TfidfVectorizer(decode_error='ignore',lowercase=True,analyzer = 'word',ngram_range = ngramRange,max_features = 600 )
x_traincv=cv.fit_transform(xData)
x_trainCountVector = x_traincv.toarray()
columnsName = cv.get_feature_names()
ColwiseSum=x_trainCountVector.sum(axis=0)
wordCountPair = sorted(zip(columnsName,ColwiseSum),key=lambda pair: pair[1],reverse=True)
word = [x for x,y in wordCountPair]
counts = [y for x,y in wordCountPair]
plotCount(word,counts)
return x_trainCountVector
# In[12]:
ngramList = [(1,1),(1,2),(1,3)]
for ngramrange in ngramList:
vectorisedData = tfidfVectorize(xData,ngramrange)
accuracyTest,precisionTest,recallTest,f1Score = testVectorizationNaiveBias(vectorisedData,yLabel)
accuracyList.append(accuracyTest)
precisionList.append(precisionTest)
recallList.append(recallTest)
f1ScoreList.append(f1Score)
# In[13]:
def autolabel(ax,rects, xpos='center'):
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0, 'right': 1, 'left': -1}
for rect in rects:
height = float("{0:.2f}".format(rect.get_height()))
ax.annotate('{}'.format(height),xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(offset[xpos]*3, 3), # use 3 points offset
textcoords="offset points", # in both directions
ha=ha[xpos], va='bottom')
# In[14]:
def plotComparisionGraph(vectorizedType,accuracyList,precisionList,recallList,f1ScoreList,endValue):
print(accuracyList)
ind = np.array([i for i in range(3,endValue,3)]) # the x locations for the groups
print(ind)
width = 0.55 # the width of the bars
fig, ax = plt.subplots(figsize = (8,6) )
rects1 = ax.bar(ind- (1.5*width) , accuracyList, width,label='Accuracy') #yerr=men_std
rects2 = ax.bar(ind- width/2, precisionList, width, label='Precision') #yerr=women_std,
rects3 = ax.bar(ind+ width/2, recallList, width, label='Recall') #yerr=women_std,
rects4 = ax.bar(ind+ (1.5*width), f1ScoreList, width, label='F1-Score') #yerr=women_std,
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Scores')
ax.set_title('Comparision between different metrics')
ax.set_xticks(ind)
ax.set_xticklabels(vectorizedType)
ax.legend(loc='upper center', bbox_to_anchor=(0.9, 0.5), ncol=1) #shadow=True
autolabel(ax,rects1, "center")
autolabel(ax,rects2, "center")
autolabel(ax,rects3, "center")
autolabel(ax,rects4, "center")
fig.tight_layout()
plt.show()
# In[15]:
plotComparisionGraph(vectorizedType,accuracyList,precisionList,recallList,f1ScoreList,19)
# ### DocToVec vectorization
# In[16]:
tt = TweetTokenizer()
tokenizedData = xData.apply(tt.tokenize)
# In[17]:
def extractVector(model,rows,col):
vector = np.zeros((rows,col))
for i in range(rows):
vector[i] = model.docvecs[i]
return vector
# In[18]:
def docToVec(vec_type,tokenizedData):
max_epochs = 10
vec_size = 200
alpha = 0.0025
#tagging the words to give tags
taggedData = [TaggedDocument(data, tags=[str(i)]) for i,data in enumerate(tokenizedData)]
#Using DoctoVec model
modle = None
if vec_type == 'DBOW':
model = Doc2Vec(dm =0,vector_size=vec_size,alpha=alpha,negative = 5,min_alpha=0.00025,min_count=1,workers = 3)
elif vec_type == 'DMC':
model = Doc2Vec(dm =0,dm_concat=1,vector_size=vec_size,alpha=alpha,negative = 5
,min_alpha=0.00025,min_count=1,workers = 3)
else:
model = Doc2Vec(dm=1,dm_mean=1,vector_size=vec_size,alpha=alpha,negative = 5
,min_alpha=0.00025,min_count=1,workers = 3)
model.build_vocab(taggedData)
for epoch in tqdm(range(max_epochs)):
model.train(taggedData,total_examples=model.corpus_count,epochs=model.iter)
model.alpha -= 0.0002
model.min_alpha = model.alpha
#retreve Vectors
return extractVector(model,len(taggedData),vec_size)
# In[19]:
doc2VecType = ['DBOW','DMC','DMM']
daccuracyList =[]
dprecisionList =[]
drecallList =[]
df1ScoreList = []
for i in range(3):
vectorizedData = docToVec(doc2VecType[2],tokenizedData)
accuracy,Precison,Recall,f1 = testVectorizationNaiveBias(vectorisedData,yLabel)
daccuracyList.append(accuracyTest)
dprecisionList.append(precisionTest)
drecallList.append(recallTest)
df1ScoreList.append(f1Score)
# In[20]:
plotComparisionGraph(doc2VecType,daccuracyList,dprecisionList,drecallList,df1ScoreList,10)
# ### Finally taking TFIDF with 1-Gram
# In[58]:
vectorisedData = tfidfVectorize(xData,(1,2))
vectorisedData = pd.DataFrame(vectorisedData)
# # Dealing with unbalances dataset
# ### Note Plot the graph to show that there is a umbalances dataset
# In[59]:
X_train, X_test, y_train, y_test = train_test_split(vectorisedData, yLabel,
test_size=0.25,stratify=yLabel ,random_state=27)
# In[62]:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# In[24]:
def HandleUnbalancedDataSet(X_train, y_train,samplesize):
X = | pd.concat([X_train, y_train], axis=1) | pandas.concat |
import pandas as pd
from sklearn.decomposition import PCA
x_train = pd.read_csv('X_train.csv', index_col=0).drop(['member_id', 'id', 'pymnt_plan', 'policy_code', 'url'], axis=1)
# last_pymnt_d : 81 => 13 dim
# issue_d : 78 => 13 dim
# last_credit_pull_d : 81 => 13 dim
# earliest_cr_line : 758 => 13 dim
X_train['issue_d'] = X_train['issue_d'].apply(lambda x: str(x)[:3])
x_train['issue_d'] = x_train['issue_d'].apply(lambda x: str(x)[:3])
x_train['last_pymnt_d'] = x_train['last_pymnt_d'].apply(lambda x: str(x)[:3])
x_train['last_credit_pull_d'] = x_train['last_credit_pull_d'].apply(lambda x: str(x)[:3])
x_train['earliest_cr_line'] = x_train['earliest_cr_line'].apply(lambda x: str(x)[:3])
x_train_one_hot = pd.get_dummies(x_train[['grade','sub_grade','home_ownership','verification_status','purpose',
'addr_state','initial_list_status','emp_length','application_type',
'verification_status_joint','hardship_flag','hardship_type','hardship_reason',
'hardship_status','hardship_loan_status','debt_settlement_flag',
'settlement_status']], dummy_na=True)
x_train_one_hot = pd.concat([x_train_one_hot, pd.get_dummies(x_train[['issue_d','last_pymnt_d','last_credit_pull_d',
'earliest_cr_line']])], axis=1)
x_train_one_hot = x_train_one_hot.astype('float')
x_test = | pd.read_csv('X_test.csv', index_col=0) | pandas.read_csv |
import numpy as np
import pytest
from pandas import (
DatetimeIndex,
IntervalIndex,
NaT,
Period,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDropna:
def test_dropna_empty(self):
ser = Series([], dtype=object)
assert len(ser.dropna()) == 0
return_value = ser.dropna(inplace=True)
assert return_value is None
assert len(ser) == 0
# invalid axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
ser.dropna(axis=1)
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
return_value = ts.dropna(inplace=True)
assert return_value is None
assert ts.name == name
def test_dropna_no_nan(self):
for ser in [
Series([1, 2, 3], name="x"),
Series([False, True, False], name="x"),
]:
result = ser.dropna()
tm.assert_series_equal(result, ser)
assert result is not ser
s2 = ser.copy()
return_value = s2.dropna(inplace=True)
assert return_value is None
tm.assert_series_equal(s2, ser)
def test_dropna_intervals(self):
ser = Series(
[np.nan, 1, 2, 3],
IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]),
)
result = ser.dropna()
expected = ser.iloc[1:]
tm.assert_series_equal(result, expected)
def test_dropna_period_dtype(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), | Period("NaT", freq="M") | pandas.Period |
import sys
import os
import logging
import datetime
import pandas as pd
from job import Job, Trace
from policies import ShortestJobFirst, FirstInFirstOut, ShortestRemainingTimeFirst, QuasiShortestServiceFirst
sys.path.append('..')
def simulate_vc(trace, vc, placement, log_dir, policy, logger, start_ts, *args):
if policy == 'sjf':
scheduler = ShortestJobFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'fifo':
scheduler = FirstInFirstOut(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'srtf':
scheduler = ShortestRemainingTimeFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'qssf':
scheduler = QuasiShortestServiceFirst(
trace, vc, placement, log_dir, logger, start_ts, args[0])
scheduler.simulate()
logger.info(f'Finish {vc.vc_name}')
return True
def get_available_schedulers():
return ['fifo', 'sjf', 'srtf', 'qssf']
def get_available_placers():
return ['random', 'consolidate', 'consolidateFirst']
def trace_process(dir, date_range):
start = '2020-04-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['job_id', 'user', 'vc', 'jobname', 'gpu_num',
'cpu_num', 'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_philly_process(dir, date_range):
start = '2017-10-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['user', 'vc', 'jobname', 'gpu_num',
'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp( | pd.Timestamp(x) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 24 09:54:59 2021
@author: Gary
This set of routines is used to assist in the curation of IngredientName.
"""
import numpy as np
import pandas as pd
import difflib as dl
import build_common
sources = build_common.get_transformed_dir()
# nonspdf = pd.read_csv('./sources/IngName_non-specific_list.csv',quotechar='$',
# encoding='utf-8')
# ctsyndf = pd.read_csv('./sources/CAS_synonyms_CompTox.csv',quotechar='$',
# encoding='utf-8')
# ctsyndf = ctsyndf[~ctsyndf.duplicated()]
# sfsyndf = pd.read_csv('./sources/CAS_synonyms.csv',quotechar='$',
# encoding='utf-8')
# ING_to_curate = pd.read_csv('./tmp/ING_to_curate.csv',quotechar='$',encoding='utf-8')
# ING_curated = pd.read_csv('./sources/ING_curated.csv',quotechar='$',encoding='utf-8')
# fullscan_df = pd.read_csv('./sources/ING_fullscan.csv',quotechar='$',encoding='utf-8')
# t = pd.merge(ING_to_curate,ING_curated[['IngredientName']],on='IngredientName',
# how='outer',indicator=True)
# ING_to_curate = t[t['_merge']=='left_only']
# print(f'Number of Names to curate: {len(ING_to_curate)}')
# create ref dictionary
# Text/syn : (curation_code, [(casnum,source), casnum,source)])
def add_to_ref(dic,txt,curcode,casnum,source):
if txt=='': # don't add empty strings
return dic
if txt in dic.keys():
if len(txt)<1:
print(txt)
# add to existing entry. NOTE THAT LAST INSTANCE FOR AN ENTRY IS
# THE GIVEN PRIMACY by writing over previous curcodes
prev = dic[txt]
new = prev[1]
new.append((casnum,source))
dic[txt] = (curcode,new)
else:
dic[txt] = (curcode,[(casnum,source)])
return dic
def summarize_refs(ref):
for item in ref:
lst = ref[item][1]
lbl = ref[item][0]
if len(lst)>1: # may need to adjust curation code
first = lst[0][0]
for l in lst[1:]:
if l[0]!=first:
lbl = 'non_spec'
ref[item] = (lbl, lst)
#print(f'adjusted {item} to non_specific')
break
return ref
def ref_stats(dic):
cntr =0
for item in dic:
if len(dic[item][1])>1:
cntr+=1
return cntr
def build_refdic():
refdic = {}
nonspdf = pd.read_csv(sources+'IngName_non-specific_list.csv',quotechar='$',
encoding='utf-8')
ctsyndf = pd.read_csv(sources+'CAS_synonyms_CompTox.csv',quotechar='$',
encoding='utf-8')
ctsyndf = ctsyndf[~ctsyndf.duplicated()]
sfsyndf = pd.read_csv(sources+'CAS_synonyms.csv',quotechar='$',
encoding='utf-8')
# build refdic, one ref set at a time
print('scifinder to refdic...')
for i,row in sfsyndf.iterrows():
refdic = add_to_ref(refdic, row.synonym, 'CASsyn', row.cas_number, 'scifinder')
print('comptox to refdic...')
for i,row in ctsyndf.iterrows():
refdic = add_to_ref(refdic, row.synonym, 'CASsyn', row.cas_number, 'comptox')
print('nonspec to refdic...')
for i,row in nonspdf.iterrows():
refdic = add_to_ref(refdic, row.non_specific_code, 'non_spec', np.NaN, row.source)
print(f'Len of refdic {len(refdic)}, num of multiple refs/item: {ref_stats(refdic)}')
return summarize_refs(refdic)
def add_curated_record(record,ING_curated):
t = pd.concat([record,ING_curated],sort=True)
return t
def save_curated_df(ING_curated):
t = ING_curated[['IngredientName','recog_syn','prospect_CAS_fromIng','syn_code','match_ratio',
'alt_CAS','first_date','change_date','change_comment',
]]
t.to_csv('./tmp/ING_curated_NEW.csv',index=False,quotechar="$",encoding='utf-8')
return t
def make_record(IngN='unk',recog_syn='unk',prospect_CAS='unk',syn_code='unk',match_ratio=0,
alt1_CAS='unk',alt1_syn='unk'):
return pd.DataFrame({'IngredientName':[IngN],
'is_new':True,
'recog_syn':[recog_syn],
'prospect_CAS_fromIng':[prospect_CAS],
'syn_code':[syn_code],
'match_ratio':[match_ratio],
'alt_CAS':[alt1_CAS]})
def add_fullscan_record(record,fs_df):
t = pd.concat([record,fs_df],sort=True)
t = t[['IngredientName','recog_syn','prospect_CAS','syn_code','match_ratio']]
t.to_csv('./tmp/ING_fullscan_NEW.csv',index=False,quotechar="$",encoding='utf-8')
return t
def make_fullscan_record(IngN='unk',recog_syn='unk',prospect_CAS='unk'
,syn_code='unk',match_ratio=0):
return pd.DataFrame({'IngredientName':[IngN],
'recog_syn':[recog_syn],
'prospect_CAS':[prospect_CAS],
'syn_code':[syn_code],
'match_ratio':[match_ratio]},)
def full_scan(to_curate,refdic,fs_df):
# used to scan the classify the entire list of IngNames. This will take
# a significant amt of time for each entry.
ingwords = to_curate[to_curate.IngredientName.notna()].IngredientName.tolist()
ref = list(refdic.keys())
for cntr,i in enumerate(ingwords):
print(f'\n\n< {i} >')
d = dl.get_close_matches(i, ref,cutoff=0.85,n=3) #
if len(d)==0:
print(f'No matches ({cntr})')
rec = make_fullscan_record(i,'no match','non_spec','non_spec',0)
fs_df = add_fullscan_record(rec, fs_df)
if len(d)>0:
for cnt,match in enumerate(d):
print(f'match {cnt} ({cntr})')
print(refdic[match])
mat = refdic[d[cnt]]
ratio = dl.SequenceMatcher(a=i,b=match).ratio()
if mat[0] == 'non_spec':
rec = make_fullscan_record(i,d[cnt],'non_spec','non_spec',ratio)
else:
rec = make_fullscan_record(i,d[cnt],mat[1][0][0],mat[0],ratio)
fs_df = add_fullscan_record(rec, fs_df)
return fs_df
def analyze_fullscan(fs_df,ING_curated): #,useNEW=True):
# if useNEW:
# fs_df = pd.read_csv('./tmp/ING_fullscan_NEW.csv',quotechar='$',encoding='utf-8')
gb1 = fs_df.groupby('IngredientName',as_index=False)['match_ratio'].max()
gb1.columns = ['IngredientName','max_ratio']
fs_df = pd.merge(fs_df,gb1,on='IngredientName',how='left')
no_match = gb1[gb1.max_ratio<0.95].IngredientName.unique().tolist()
perfect = gb1[gb1.max_ratio==1].IngredientName.unique().tolist()
# ---- store no_matches:
print('parsing Names with no matches')
for ing in no_match:
record = make_record(IngN=ing,prospect_CAS='non_spec',syn_code='no_match')
ING_curated = add_curated_record(record, ING_curated)
# ---- store perfect matches:
print('parsing Names with perfect matches')
gb = fs_df[fs_df.match_ratio==1].groupby('IngredientName',as_index=False)['prospect_CAS'].first()
gb.columns = ['IngredientName','topCAS']
fs2 = pd.merge(fs_df,gb,on='IngredientName',how='left')
c1 = fs2.prospect_CAS!=fs2.topCAS
c2 = fs2.match_ratio>=0.95
gb2 = fs2[c1&c2].groupby('IngredientName')['prospect_CAS'].apply(set).apply(list).reset_index()
gb2.columns = ['IngredientName','alt1_CAS']
fs2 = pd.merge(fs2,gb2,on='IngredientName',how='left')
for i,row in fs2[fs2.match_ratio==1].iterrows():
record = make_record(IngN=row.IngredientName,
prospect_CAS=row.prospect_CAS,
recog_syn = row.recog_syn,
syn_code='perfect',
match_ratio=row.match_ratio,
alt1_CAS=row.alt1_CAS)
ING_curated = add_curated_record(record, ING_curated)
# ---- find and store matches that are less than perfect
print('parsing close and conflicting matches')
c1 = fs_df.IngredientName.isin(no_match)
c2 = fs_df.IngredientName.isin(perfect)
fs3 = fs_df[(~c1)&(~c2)].copy() # the rest of the list
gb = fs3.groupby(['IngredientName','prospect_CAS'],as_index=False)['match_ratio'].max()
gb1 = gb.groupby('IngredientName',as_index=False)['match_ratio'].max()
gb1.columns = ['IngredientName','max_ratio']
mg = pd.merge(gb,gb1,on='IngredientName',how='left')
mg['ratdiff'] = mg.max_ratio-mg.match_ratio
#mg = mg[mg.match_ratio!=mg.max_ratio]
mg = mg[mg.ratdiff<0.05]
gbwithin = mg.groupby('IngredientName',as_index=False)['prospect_CAS'].count()
gbwithin.columns = ['IngredientName','numprospect']
#mg = pd.merge(mg,gbwithin,on='IngredientName',how='left')
gbalt = mg.groupby('IngredientName')['prospect_CAS'].apply(list).reset_index()
gbalt.columns = ['IngredientName','alt1_CAS']
t = fs3.sort_values('match_ratio', ascending=False).drop_duplicates('IngredientName')
t = pd.merge(t,gbalt,on='IngredientName',how='left')
t = fs3.sort_values('match_ratio', ascending=False).drop_duplicates('IngredientName')
t = | pd.merge(t,gbalt,on='IngredientName',how='left') | pandas.merge |
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used for math functions
import math
# used to create threads & dynamic loading of modules
import threading
import multiprocessing
import importlib
# used for directory handling
import glob
#discord needs import request
import requests
#redis
import redis
# Added for WebSocket Support
import pandas as pd
import pandas_ta as ta
import websocket, pprint
import ccxt
import logging
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# used to display holding coins in an ascii table
from prettytable import PrettyTable
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key,
load_discord_creds
)
# my helper utils
from helpers.os_utils import(rchop)
from threading import Thread, Event
# logging needed otherwise slient fails
logger = logging.getLogger('websocket')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def InitializeDataFeed():
#######################################
# (a) Create redis database
# (b) Define watch list and create a row for every coin
# (c) Open Web socket to start collecting market data into the redis database
# TO DO: Review: Looking at 3 things - SOCKET_LIST - bookTicker and aggTrade are pretty busy
# ######################################
SOCKET_URL= "wss://stream.binance.com:9443/ws/"
SOCKET_LIST = ["coin@bookTicker","coin@kline_1m","coin@aggTrade"]
current_ticker_list = []
#-------------------------------------------------------------------------------
# (a) Create redis database (MarketData) with a hash and list collection
# Hash Keys: L1:{Coin} -> which has Level 1 market data fields, see MarketDataRec
# list Keys: L1 -> L1:{Coin}
# Why: I added the list sorting of the hash, otherwise I could not sort
# (b) Define watch list of coins we want to monitor, see SOCKET_LIST + current_ticker_list
CoinsCounter = 0
tickers = [line.strip() for line in open(TICKERS_LIST)]
print( str(datetime.now()) + " :Preparing watch list defined in tickers file...")
for item in tickers:
#Create Dataframes with coins
coin = item + PAIR_WITH
data = {'symbol': coin}
MarketDataRec = {'symbol': coin , 'open': CoinsCounter, 'high': -1, 'low': -1, 'close': -1, 'potential' : -1, 'interval' : -1,'LastPx' : -1,'LastQty': -1,'BBPx': -1,'BBQty': -1,'BAPx': -1,'BAQty': -1,'updated' : -1}
MarketData.hmset("L1:"+coin, MarketDataRec)
MarketData.lpush("L1", "L1:"+coin)
#get_data_frame(coin) #Needs to move out
coinlist= [sub.replace('coin', coin.lower()) for sub in SOCKET_LIST]
current_ticker_list.extend(coinlist)
CoinsCounter += 1
print(f'{str(datetime.now())}: Total Coins: {CoinsCounter}')
if DEBUG:
start = datetime.now()
#Example sort and iterate thru the hash collection
GetCoinsInOrder = MarketData.sort('L1',alpha=True,desc=False,by='*->open')
for key in GetCoinsInOrder:
data = MarketData.hgetall(key)
print(data)
print('---scan--')
end = datetime.now()
print(str('queried in ' + str(end - start) + ' with sort.'))
print (current_ticker_list)
#-------------------------------------------------------------------------------
# (c) Create a Web Socket to get the market data
SOCKET = SOCKET_URL + '/'.join(current_ticker_list)
print( str(datetime.now()) + " :Connecting to WebSocket ...")
if DEBUG: print( str(datetime.now()) + " :Connecting to WebSocket " + SOCKET + " ...")
web_socket_app = websocket.WebSocketApp(SOCKET, header=['User-Agent: Python'],
on_message=on_message,
on_error=on_error,
on_close=on_close,
on_open=on_open)
web_socket_app.run_forever()
web_socket_app.close()
#-------------------------------------------------------------------------------
def is_nan(x):
return (x == -1 )
def get_data_frame(symbol):
global MarketPriceFrames
exchange = ccxt.binance()
timeframes = ['5m','15m','4h', '1d']
for item in timeframes:
macd = exchange.fetch_ohlcv(symbol, timeframe=item, limit=36)
df1 = | pd.DataFrame(macd, columns=['time', 'open', 'high', 'low', 'close', 'volume']) | pandas.DataFrame |
import boto3, argparse, subprocess, sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
def pip_install(package):
subprocess.call([sys.executable, "-m", "pip", "install", package])
pip_install('sagemaker')
import sagemaker
from sagemaker.feature_store.feature_group import FeatureGroup
if __name__=='__main__':
parser = argparse.ArgumentParser()
# preprocessing arguments
parser.add_argument('--region', type=str)
parser.add_argument('--bucket', type=str)
args, _ = parser.parse_known_args()
print('Received arguments {}'.format(args))
region = args.region
bucket = args.bucket
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client(service_name='sagemaker')
featurestore_client = boto_session.client(service_name='sagemaker-featurestore-runtime')
session = sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_featurestore_runtime_client=featurestore_client)
# Read feature group name
with open('/opt/ml/processing/input/feature_group_name.txt') as f:
feature_group_name = f.read()
feature_group = FeatureGroup(name=feature_group_name, sagemaker_session=session)
feature_group_query = feature_group.athena_query()
feature_group_table = feature_group_query.table_name
print(feature_group_table)
query_string = 'SELECT label,review_body FROM "' \
+ feature_group_table+'"' \
+ ' INNER JOIN (SELECT product_id FROM (SELECT product_id, avg(star_rating) as avg_rating, count(*) as review_count \
FROM "' + feature_group_table+'"' \
+ ' GROUP BY product_id) WHERE review_count > 1000) tmp ON "' \
+ feature_group_table+'"'+ '.product_id=tmp.product_id;'
print(query_string)
dataset = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import numpy as np
import pandas as pd
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[2]:
BEA_API_KEY = dotenv_values(".env").get("BEA_API_KEY")
# In[3]:
# Table 1.1.2. Contributions to Percent Change in Real Gross Domestic Product (A) (Q)
response_T10102 = requests.get(f'https://apps.bea.gov/api/data/?&UserID={BEA_API_KEY}&'
'method=GetData&DataSetName=NIPA&TableName=T10102&Frequency=Q&Year=ALL&ResultFormat=JSON')
# In[4]:
df = pd.DataFrame(eval(response_T10102.content.decode('utf-8'))['BEAAPI']['Results']['Data'])
# In[5]:
df = df[['LineNumber', 'LineDescription', 'TimePeriod', 'DataValue']]
# In[6]:
#Hago el reemplazo de las fechas
dict_quarters = {'Q1':'-01-01', 'Q2':'-04-01', 'Q3':'-07-01', 'Q4':'-10-01'}
df['TimePeriod'] = df['TimePeriod'].replace(dict_quarters, regex=True)
df['TimePeriod'] = | pd.to_datetime(df['TimePeriod'], format='%Y-%m-%d') | pandas.to_datetime |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
| tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0) | pandas.util.testing.assertRaisesRegexp |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 11:04:59 2020
@author: <NAME>
"""
import pandas as pd
import janitor
import datetime
import pickle
from pathlib import Path
#from builder import *
from fars_cleaner.builder import get_renaming
import fars_cleaner.extra_info as ei
from fars_cleaner.fars_utils import createPerID
from fars_cleaner import FARSFetcher
class FARSProcessor:
def __init__(self,
start_year=1975,
end_year=2018,
fetcher: FARSFetcher = None,
):
"""
Parameters
----------
start_year : int, optional
Year to start analysis. The default is 1975.
end_year : int, optional
Year to end analysis. The default is 2018.
first_run : bool, optional
Flag to determine whether to process and write-out the required files, or whether the data can be loaded from
disk pre-processed. The default is True.
use_dask : bool, optional
Flag to determine whether to parallelize using Dask. Not implemented at this time.
The default is False.
client : Dask Distributed Client, optional. Required if use_dask is True. Not implemented at this time.
Returns
-------
"""
self.NOW = datetime.datetime.now()
self.start_year = start_year
self.end_year = end_year
if fetcher is None:
self.fetcher = FARSFetcher()
else:
self.fetcher = fetcher
fetcher.fetch_subset(self.start_year, self.end_year)
self.data_dir = self.fetcher.get_data_path()
with open(fetcher.fetch_mappers(), 'rb') as f:
self.mappers = pickle.load(f)
self.load_paths = self.fetcher.fetch_subset(self.start_year, self.end_year)
people = []
vehicles = []
accidents = []
for year in range(start_year, end_year + 1):
vehicle = self.load_vehicles(year)
person = self.load_people(year)
accident = self.load_accidents(year)
accident['YEAR'] = year
vehicle['YEAR'] = year
person['YEAR'] = year
people.append(person)
vehicles.append(vehicle)
accidents.append(accident)
#self.accidents = pd.concat(accidents)
print("accidents")
self.accidents = self.process_accidents( | pd.concat(accidents) | pandas.concat |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections import namedtuple, defaultdict, OrderedDict
from time import sleep
from math import fabs
import datetime
import pytz
from six import iteritems, itervalues
import polling
import pandas as pd
import numpy as np
from zipline.gens.brokers.broker import Broker
from zipline.finance.order import (Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS)
from zipline.finance.execution import (MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder)
from zipline.finance.transaction import Transaction
import zipline.protocol as zp
from zipline.protocol import MutableView
from zipline.api import symbol as symbol_lookup
from zipline.errors import SymbolNotFound
from ib.ext.EClientSocket import EClientSocket
from ib.ext.EWrapper import EWrapper
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.ExecutionFilter import ExecutionFilter
from ib.ext.EClientErrors import EClientErrors
from logbook import Logger
if sys.version_info > (3,):
long = int
log = Logger('IB Broker')
Position = namedtuple('Position', ['contract', 'position', 'market_price',
'market_value', 'average_cost',
'unrealized_pnl', 'realized_pnl',
'account_name'])
_max_wait_subscribe = 10 # how many cycles to wait
_connection_timeout = 15 # Seconds
_poll_frequency = 0.1
symbol_to_exchange = defaultdict(lambda: 'SMART')
symbol_to_exchange['VIX'] = 'CBOE'
symbol_to_exchange['SPX'] = 'CBOE'
symbol_to_exchange['VIX3M'] = 'CBOE'
symbol_to_exchange['VXST'] = 'CBOE'
symbol_to_exchange['VXMT'] = 'CBOE'
symbol_to_exchange['GVZ'] = 'CBOE'
symbol_to_exchange['GLD'] = 'ARCA'
symbol_to_exchange['GDX'] = 'ARCA'
symbol_to_exchange['GPRO'] = 'SMART/NASDAQ'
symbol_to_exchange['MSFT'] = 'SMART/NASDAQ'
symbol_to_exchange['CSCO'] = 'SMART/NASDAQ'
symbol_to_sec_type = defaultdict(lambda: 'STK')
symbol_to_sec_type['VIX'] = 'IND'
symbol_to_sec_type['VIX3M'] = 'IND'
symbol_to_sec_type['VXST'] = 'IND'
symbol_to_sec_type['VXMT'] = 'IND'
symbol_to_sec_type['GVZ'] = 'IND'
symbol_to_sec_type['SPX'] = 'IND'
def log_message(message, mapping):
try:
del (mapping['self'])
except (KeyError,):
pass
items = list(mapping.items())
items.sort()
log.debug(('### %s' % (message,)))
for k, v in items:
log.debug((' %s:%s' % (k, v)))
def _method_params_to_dict(args):
return {k: v
for k, v in iteritems(args)
if k != 'self'}
class TWSConnection(EClientSocket, EWrapper):
def __init__(self, tws_uri):
"""
:param tws_uri: host:listening_port:client_id
- host ip of running tws or ibgw
- port, default for tws 7496 and for ibgw 4002
- your client id, could be any number as long as it's not already used
"""
EWrapper.__init__(self)
EClientSocket.__init__(self, anyWrapper=self)
self.tws_uri = tws_uri
host, port, client_id = self.tws_uri.split(':')
self._host = host
self._port = int(port)
self.client_id = int(client_id)
self._next_ticker_id = 0
self._next_request_id = 0
self._next_order_id = None
self.managed_accounts = None
self.symbol_to_ticker_id = {}
self.ticker_id_to_symbol = {}
self.last_tick = defaultdict(dict)
self.bars = {}
# accounts structure: accounts[account_id][currency][value]
self.accounts = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: np.NaN)))
self.accounts_download_complete = False
self.positions = {}
self.portfolio = {}
self.open_orders = {}
self.order_statuses = {}
self.executions = defaultdict(OrderedDict)
self.commissions = defaultdict(OrderedDict)
self._execution_to_order_id = {}
self.time_skew = None
self.unrecoverable_error = False
self.connect()
def connect(self):
log.info("Connecting: {}:{}:{}".format(self._host, self._port,
self.client_id))
self.eConnect(self._host, self._port, self.client_id)
timeout = _connection_timeout
while timeout and not self.isConnected():
sleep(_poll_frequency)
timeout -= _poll_frequency
else:
if not self.isConnected():
raise SystemError("Connection timeout during TWS connection!")
self._download_account_details()
log.info("Managed accounts: {}".format(self.managed_accounts))
self.reqCurrentTime()
self.reqIds(1)
while self.time_skew is None or self._next_order_id is None:
sleep(_poll_frequency)
log.info("Local-Broker Time Skew: {}".format(self.time_skew))
def disconnect(self):
self.eDisconnect()
def _download_account_details(self):
exec_filter = ExecutionFilter()
exec_filter.m_clientId = self.client_id
self.reqExecutions(self.next_request_id, exec_filter)
self.reqManagedAccts()
while self.managed_accounts is None:
sleep(_poll_frequency)
for account in self.managed_accounts:
self.reqAccountUpdates(subscribe=True, acctCode=account)
while self.accounts_download_complete is False:
sleep(_poll_frequency)
@property
def next_ticker_id(self):
ticker_id = self._next_ticker_id
self._next_ticker_id += 1
return ticker_id
@property
def next_request_id(self):
request_id = self._next_request_id
self._next_request_id += 1
return request_id
@property
def next_order_id(self):
order_id = self._next_order_id
self._next_order_id += 1
return order_id
def subscribe_to_market_data(self,
symbol,
sec_type='STK',
exchange='SMART',
currency='USD'):
if symbol in self.symbol_to_ticker_id:
# Already subscribed to market data
return
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = symbol_to_sec_type[symbol]
contract.m_exchange = symbol_to_exchange[symbol]
contract.m_currency = currency
ticker_id = self.next_ticker_id
self.symbol_to_ticker_id[symbol] = ticker_id
self.ticker_id_to_symbol[ticker_id] = symbol
# INDEX tickers cannot be requested with market data. The data can,
# however, be requested with realtimeBars. This change will make
# sure we can request data from INDEX tickers like SPX, VIX, etc.
if contract.m_secType == 'IND':
self.reqRealTimeBars(ticker_id, contract, 60, 'TRADES', False)
else:
tick_list = "233" # RTVolume, return tick_type == 48
self.reqHistoricalData(ticker_id, contract, '', '60 S', '1 secs', 'TRADES', False, 2)
self.reqMktData(ticker_id, contract, tick_list, False)
def unsubscribe_from_market_data(self):
for symbol, ticker_id in self.symbol_to_ticker_id.copy().items():
if symbol_to_sec_type[symbol] == 'IND':
self.cancelRealTimeBars(ticker_id)
else:
self.cancelMktData(ticker_id)
self.symbol_to_ticker_id.pop(symbol, None)
def _process_tick(self, ticker_id, tick_type, value):
try:
symbol = self.ticker_id_to_symbol[ticker_id]
except KeyError:
log.error("Tick {} for id={} is not registered".format(tick_type,
ticker_id))
return
if tick_type == 48:
# RT Volume Bar. Format:
# Last trade price; Last trade size;Last trade time;Total volume;\
# VWAP;Single trade flag
# e.g.: 701.28;1;1348075471534;67854;701.46918464;true
(last_trade_price, last_trade_size, last_trade_time, total_volume,
vwap, single_trade_flag) = value.split(';')
# Ignore this update if last_trade_price is empty:
# tickString: tickerId=0 tickType=48/RTVolume ;0;1469805548873;\
# 240304;216.648653;true
if len(last_trade_price) == 0:
return
last_trade_dt = pd.to_datetime(float(last_trade_time), unit='ms',
utc=True)
self._add_bar(symbol, float(last_trade_price),
int(last_trade_size), last_trade_dt,
int(total_volume), float(vwap),
single_trade_flag)
def _add_bar(self, symbol, last_trade_price, last_trade_size,
last_trade_time, total_volume, vwap, single_trade_flag):
bar = pd.DataFrame(index=pd.DatetimeIndex([last_trade_time]),
data={'last_trade_price': last_trade_price,
'last_trade_size': last_trade_size,
'total_volume': total_volume,
'vwap': vwap,
'single_trade_flag': single_trade_flag})
if symbol not in self.bars:
self.bars[symbol] = bar
else:
self.bars[symbol] = self.bars[symbol].append(bar)
def tickPrice(self, ticker_id, field, price, can_auto_execute):
self._process_tick(ticker_id, tick_type=field, value=price)
def tickSize(self, ticker_id, field, size):
self._process_tick(ticker_id, tick_type=field, value=size)
def tickOptionComputation(self,
ticker_id, field, implied_vol, delta, opt_price,
pv_dividend, gamma, vega, theta, und_price):
log_message('tickOptionComputation', vars())
def tickGeneric(self, ticker_id, tick_type, value):
self._process_tick(ticker_id, tick_type=tick_type, value=value)
def tickString(self, ticker_id, tick_type, value):
self._process_tick(ticker_id, tick_type=tick_type, value=value)
def tickEFP(self, ticker_id, tick_type, basis_points,
formatted_basis_points, implied_future, hold_days,
future_expiry, dividend_impact, dividends_to_expiry):
log_message('tickEFP', vars())
def updateAccountValue(self, key, value, currency, account_name):
self.accounts[account_name][currency][key] = value
def updatePortfolio(self,
contract,
position,
market_price,
market_value,
average_cost,
unrealized_pnl,
realized_pnl,
account_name):
symbol = contract.m_symbol
position = Position(contract=contract,
position=position,
market_price=market_price,
market_value=market_value,
average_cost=average_cost,
unrealized_pnl=unrealized_pnl,
realized_pnl=realized_pnl,
account_name=account_name)
self.positions[symbol] = position
def updateAccountTime(self, time_stamp):
pass
def accountDownloadEnd(self, account_name):
self.accounts_download_complete = True
def nextValidId(self, order_id):
self._next_order_id = order_id
def contractDetails(self, req_id, contract_details):
log_message('contractDetails', vars())
def contractDetailsEnd(self, req_id):
log_message('contractDetailsEnd', vars())
def bondContractDetails(self, req_id, contract_details):
log_message('bondContractDetails', vars())
def orderStatus(self, order_id, status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id, why_held):
self.order_statuses[order_id] = _method_params_to_dict(vars())
log.debug(
"Order-{order_id} {status}: "
"filled={filled} remaining={remaining} "
"avg_fill_price={avg_fill_price} "
"last_fill_price={last_fill_price} ".format(
order_id=order_id,
status=self.order_statuses[order_id]['status'],
filled=self.order_statuses[order_id]['filled'],
remaining=self.order_statuses[order_id]['remaining'],
avg_fill_price=self.order_statuses[order_id]['avg_fill_price'],
last_fill_price=self.order_statuses[order_id]['last_fill_price']))
def openOrder(self, order_id, contract, order, state):
self.open_orders[order_id] = _method_params_to_dict(vars())
log.debug(
"Order-{order_id} {status}: "
"{order_action} {order_count} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price}".format(
order_id=order_id,
status=state.m_status,
order_action=order.m_action,
order_count=order.m_totalQuantity,
symbol=contract.m_symbol,
order_type=order.m_orderType,
limit_price=order.m_lmtPrice,
stop_price=order.m_auxPrice))
def openOrderEnd(self):
pass
def execDetails(self, req_id, contract, exec_detail):
order_id, exec_id = exec_detail.m_orderId, exec_detail.m_execId
self.executions[order_id][exec_id] = _method_params_to_dict(vars())
self._execution_to_order_id[exec_id] = order_id
log.info(
"Order-{order_id} executed @ {exec_time}: "
"{symbol} current: {shares} @ ${price} "
"total: {cum_qty} @ ${avg_price} "
"exec_id: {exec_id} by client-{client_id}".format(
order_id=order_id, exec_id=exec_id,
exec_time=pd.to_datetime(exec_detail.m_time),
symbol=contract.m_symbol,
shares=exec_detail.m_shares,
price=exec_detail.m_price,
cum_qty=exec_detail.m_cumQty,
avg_price=exec_detail.m_avgPrice,
client_id=exec_detail.m_clientId))
def execDetailsEnd(self, req_id):
log.debug(
"Execution details completed for request {req_id}".format(
req_id=req_id))
def commissionReport(self, commission_report):
exec_id = commission_report.m_execId
if exec_id in self._execution_to_order_id:
order_id = self._execution_to_order_id[exec_id]
self.commissions[order_id][exec_id] = commission_report
log.debug(
"Order-{order_id} report: "
"realized_pnl: ${realized_pnl} "
"commission: ${commission} yield: {yield_} "
"exec_id: {exec_id}".format(
order_id=order_id,
exec_id=exec_id,
realized_pnl=commission_report.m_realizedPNL
if commission_report.m_realizedPNL != sys.float_info.max
else 0,
commission=commission_report.m_commission,
yield_=commission_report.m_yield
if commission_report.m_yield != sys.float_info.max
else 0)
)
def connectionClosed(self):
self.unrecoverable_error = True
log.error("IB Connection closed")
def error(self, id_=None, error_code=None, error_msg=None):
if isinstance(id_, Exception):
# XXX: for an unknown reason 'log' is None in this branch,
# therefore it needs to be instantiated before use
global log
if not log:
log = Logger('IB Broker')
log.exception(id_)
if isinstance(error_code, EClientErrors.CodeMsgPair):
error_msg = error_code.msg()
error_code = error_code.code()
if isinstance(error_code, int):
if error_code in (502, 503, 326):
# 502: Couldn't connect to TWS.
# 503: The TWS is out of date and must be upgraded.
# 326: Unable connect as the client id is already in use.
self.unrecoverable_error = True
if error_code < 1000:
log.error("[{}] {} ({})".format(error_code, error_msg, id_))
else:
log.info("[{}] {} ({})".format(error_code, error_msg, id_))
else:
log.error("[{}] {} ({})".format(error_code, error_msg, id_))
def updateMktDepth(self, ticker_id, position, operation, side, price,
size):
log_message('updateMktDepth', vars())
def updateMktDepthL2(self, ticker_id, position, market_maker, operation,
side, price, size):
log_message('updateMktDepthL2', vars())
def updateNewsBulletin(self, msg_id, msg_type, message, orig_exchange):
log_message('updateNewsBulletin', vars())
def managedAccounts(self, accounts_list):
self.managed_accounts = accounts_list.split(',')
def receiveFA(self, fa_data_type, xml):
log_message('receiveFA', vars())
def historicalData(self, req_id, date, open_, high, low, close, volume,
count, wap, has_gaps):
if close != -1:
value = (";".join([str(close), str(count), str(int(date) * 1000), str(volume),
str(wap), "false"]))
self._process_tick(req_id, tick_type=48, value=value)
def scannerParameters(self, xml):
log_message('scannerParameters', vars())
def scannerData(self, req_id, rank, contract_details, distance, benchmark,
projection, legs_str):
log_message('scannerData', vars())
def currentTime(self, time):
self.time_skew = (pd.to_datetime('now', utc=True) -
pd.to_datetime(long(time), unit='s', utc=True))
def deltaNeutralValidation(self, req_id, under_comp):
log_message('deltaNeutralValidation', vars())
def fundamentalData(self, req_id, data):
log_message('fundamentalData', vars())
def marketDataType(self, req_id, market_data_type):
log_message('marketDataType', vars())
def realtimeBar(self, req_id, time, open_, high, low, close, volume, wap,
count):
value = (";".join([str(close), str(count), str(time), str(volume),
str(wap), "true"]))
self._process_tick(req_id, tick_type=48, value=value)
def scannerDataEnd(self, req_id):
log_message('scannerDataEnd', vars())
def tickSnapshotEnd(self, req_id):
log_message('tickSnapshotEnd', vars())
def position(self, account, contract, pos, avg_cost):
log_message('position', vars())
def positionEnd(self):
log_message('positionEnd', vars())
def accountSummary(self, req_id, account, tag, value, currency):
log_message('accountSummary', vars())
def accountSummaryEnd(self, req_id):
log_message('accountSummaryEnd', vars())
class IBBroker(Broker):
def __init__(self, tws_uri, account_id=None):
"""
:param tws_uri: host:listening_port:client_id
- host ip of running tws or ibgw
- port, default for tws 7496 and for ibgw 4002
- your client id, could be any number as long as it's not already used
"""
self._tws_uri = tws_uri
self._orders = {}
self._transactions = {}
self._tws = TWSConnection(tws_uri)
self.account_id = (self._tws.managed_accounts[0] if account_id is None
else account_id)
self.currency = 'USD'
self.timezone = 'US/Eastern'
self._subscribed_assets = []
super(self.__class__, self).__init__()
def init(self):
if not self._tws.isConnected():
self._tws.connect()
@property
def subscribed_assets(self):
return self._subscribed_assets
def subscribe_to_market_data(self, asset):
if asset not in self.subscribed_assets:
log.info("Subscribing to market data for {}".format(
asset))
# remove str() cast to have a fun debugging journey
self._tws.subscribe_to_market_data(str(asset.symbol))
self._subscribed_assets.append(asset)
try:
polling.poll(
lambda: asset.symbol in self._tws.bars,
timeout=_max_wait_subscribe,
step=_poll_frequency)
except polling.TimeoutException as te:
log.warning('!!!WARNING: I did not manage to subscribe to %s ' % str(asset.symbol))
else:
log.debug("Subscription completed")
@property
def positions(self):
self._get_positions_from_broker()
return self.metrics_tracker.positions
def _get_positions_from_broker(self):
"""
get the positions from the broker and update zipline objects ( the ledger )
should be used once at startup and once every time we want to refresh the positions array
"""
cur_pos_in_tracker = self.metrics_tracker.positions
for symbol in self._tws.positions:
ib_position = self._tws.positions[symbol]
try:
z_position = zp.Position(zp.InnerPosition(symbol_lookup(symbol)))
editable_position = MutableView(z_position)
except SymbolNotFound:
# The symbol might not have been ingested to the db therefore
# it needs to be skipped.
log.warning('Wanted to subscribe to %s, but this asset is probably not ingested' % symbol)
continue
editable_position._underlying_position.amount = int(ib_position.position)
editable_position._underlying_position.cost_basis = float(ib_position.average_cost)
# Check if symbol exists in bars df
if symbol in self._tws.bars:
editable_position._underlying_position.last_sale_price = \
float(self._tws.bars[symbol].last_trade_price.iloc[-1])
editable_position._underlying_position.last_sale_date = \
self._tws.bars[symbol].index.values[-1]
else:
# editable_position._underlying_position.last_sale_price = None # this cannot be set to None. only numbers.
editable_position._underlying_position.last_sale_date = None
self.metrics_tracker.update_position(z_position.asset,
amount=z_position.amount,
last_sale_price=z_position.last_sale_price,
last_sale_date=z_position.last_sale_date,
cost_basis=z_position.cost_basis)
for asset in cur_pos_in_tracker:
if asset.symbol not in self._tws.positions:
# deleting object from the metrcs_tracker as its not in the portfolio
self.metrics_tracker.update_position(asset,
amount=0)
# for some reason, the metrics tracker has self.positions AND self.portfolio.positions. let's make sure
# these objects are consistent
# (self.portfolio.positions is self.metrics_tracker._ledger._portfolio.positions)
# (self.metrics_tracker.positions is self.metrics_tracker._ledger.position_tracker.positions)
self.metrics_tracker._ledger._portfolio.positions = self.metrics_tracker.positions
@property
def portfolio(self):
positions = self.positions
return self.metrics_tracker.portfolio
def get_account_from_broker(self):
ib_account = self._tws.accounts[self.account_id][self.currency]
return ib_account
def set_metrics_tracker(self, metrics_tracker):
self.metrics_tracker = metrics_tracker
@property
def account(self):
ib_account = self._tws.accounts[self.account_id][self.currency]
self.metrics_tracker.override_account_fields(
settled_cash=float(ib_account['CashBalance']),
accrued_interest=float(ib_account['AccruedCash']),
buying_power=float(ib_account['BuyingPower']),
equity_with_loan=float(ib_account['EquityWithLoanValue']),
total_positions_value=float(ib_account['StockMarketValue']),
total_positions_exposure=float(
(float(ib_account['StockMarketValue']) /
(float(ib_account['StockMarketValue']) +
float(ib_account['TotalCashValue'])))),
regt_equity=float(ib_account['RegTEquity']),
regt_margin=float(ib_account['RegTMargin']),
initial_margin_requirement=float(
ib_account['FullInitMarginReq']),
maintenance_margin_requirement=float(
ib_account['FullMaintMarginReq']),
available_funds=float(ib_account['AvailableFunds']),
excess_liquidity=float(ib_account['ExcessLiquidity']),
cushion=float(
self._tws.accounts[self.account_id]['']['Cushion']),
day_trades_remaining=float(
self._tws.accounts[self.account_id]['']['DayTradesRemaining']),
leverage=float(
self._tws.accounts[self.account_id]['']['Leverage-S']),
net_leverage=(
float(ib_account['StockMarketValue']) /
(float(ib_account['TotalCashValue']) +
float(ib_account['StockMarketValue']))),
net_liquidation=float(ib_account['NetLiquidation'])
)
return self.metrics_tracker.account
@property
def time_skew(self):
return self._tws.time_skew
def is_alive(self):
return not self._tws.unrecoverable_error
@staticmethod
def _safe_symbol_lookup(symbol):
try:
return symbol_lookup(symbol)
except SymbolNotFound:
return None
_zl_order_ref_magic = '!ZL'
@classmethod
def _create_order_ref(cls, ib_order, dt=pd.to_datetime('now', utc=True)):
order_type = ib_order.m_orderType.replace(' ', '_')
return \
"A:{action} Q:{qty} T:{order_type} " \
"L:{limit_price} S:{stop_price} D:{date} {magic}".format(
action=ib_order.m_action,
qty=ib_order.m_totalQuantity,
order_type=order_type,
limit_price=ib_order.m_lmtPrice,
stop_price=ib_order.m_auxPrice,
date=int(dt.value / 1e9),
magic=cls._zl_order_ref_magic)
@classmethod
def _parse_order_ref(cls, ib_order_ref):
if not ib_order_ref or \
not ib_order_ref.endswith(cls._zl_order_ref_magic):
return None
try:
action, qty, order_type, limit_price, stop_price, dt, _ = \
ib_order_ref.split(' ')
if not all(
[action.startswith('A:'),
qty.startswith('Q:'),
order_type.startswith('T:'),
limit_price.startswith('L:'),
stop_price.startswith('S:'),
dt.startswith('D:')]):
return None
return {
'action': action[2:],
'qty': int(qty[2:]),
'order_type': order_type[2:].replace('_', ' '),
'limit_price': float(limit_price[2:]),
'stop_price': float(stop_price[2:]),
'dt': pd.to_datetime(dt[2:], unit='s', utc=True)}
except ValueError:
log.warning("Error parsing order metadata: {}".format(
ib_order_ref))
return None
def order(self, asset, amount, style):
contract = Contract()
contract.m_symbol = str(asset.symbol)
contract.m_currency = self.currency
contract.m_exchange = symbol_to_exchange[str(asset.symbol)]
contract.m_secType = symbol_to_sec_type[str(asset.symbol)]
order = Order()
order.m_totalQuantity = int(fabs(amount))
order.m_action = "BUY" if amount > 0 else "SELL"
is_buy = (amount > 0)
order.m_lmtPrice = style.get_limit_price(is_buy) or 0
order.m_auxPrice = style.get_stop_price(is_buy) or 0
if isinstance(style, MarketOrder):
order.m_orderType = "MKT"
elif isinstance(style, LimitOrder):
order.m_orderType = "LMT"
elif isinstance(style, StopOrder):
order.m_orderType = "STP"
elif isinstance(style, StopLimitOrder):
order.m_orderType = "STP LMT"
# TODO: Support GTC orders both here and at blotter_live
order.m_tif = "DAY"
order.m_orderRef = self._create_order_ref(order)
ib_order_id = self._tws.next_order_id
zp_order = self._get_or_create_zp_order(ib_order_id, order, contract)
log.info(
"Placing order-{order_id}: "
"{action} {qty} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price} {tif}".format(
order_id=ib_order_id,
action=order.m_action,
qty=order.m_totalQuantity,
symbol=contract.m_symbol,
order_type=order.m_orderType,
limit_price=order.m_lmtPrice,
stop_price=order.m_auxPrice,
tif=order.m_tif
))
self._tws.placeOrder(ib_order_id, contract, order)
return zp_order
@property
def orders(self):
self._update_orders()
return self._orders
def _ib_to_zp_order_id(self, ib_order_id):
return "IB-{date}-{account_id}-{client_id}-{order_id}".format(
date=str(datetime.datetime.now(pytz.timezone(self.timezone)).date()),
account_id=self.account_id,
client_id=self._tws.client_id,
order_id=ib_order_id)
@staticmethod
def _action_qty_to_amount(action, qty):
return qty if action == 'BUY' else -1 * qty
def _get_or_create_zp_order(self, ib_order_id,
ib_order=None, ib_contract=None):
zp_order_id = self._ib_to_zp_order_id(ib_order_id)
if zp_order_id in self._orders:
return self._orders[zp_order_id]
# Try to reconstruct the order from the given information:
# open order state and execution state
symbol, order_details = None, None
if ib_order and ib_contract:
symbol = ib_contract.m_symbol
order_details = self._parse_order_ref(ib_order.m_orderRef)
if not order_details and ib_order_id in self._tws.open_orders:
open_order = self._tws.open_orders[ib_order_id]
symbol = open_order['contract'].m_symbol
order_details = self._parse_order_ref(
open_order['order'].m_orderRef)
if not order_details and ib_order_id in self._tws.executions:
executions = self._tws.executions[ib_order_id]
last_exec_detail = list(executions.values())[-1]['exec_detail']
last_exec_contract = list(executions.values())[-1]['contract']
symbol = last_exec_contract.m_symbol
order_details = self._parse_order_ref(last_exec_detail.m_orderRef)
asset = self._safe_symbol_lookup(symbol)
if not asset:
log.warning(
"Ignoring symbol {symbol} which has associated "
"order but it is not registered in bundle".format(
symbol=symbol))
return None
if order_details:
amount = self._action_qty_to_amount(order_details['action'],
order_details['qty'])
stop_price = order_details['stop_price']
limit_price = order_details['limit_price']
dt = order_details['dt']
else:
dt = pd.to_datetime('now', utc=True)
amount, stop_price, limit_price = 0, None, None
if ib_order_id in self._tws.open_orders:
open_order = self._tws.open_orders[ib_order_id]['order']
amount = self._action_qty_to_amount(
open_order.m_action, open_order.m_totalQuantity)
stop_price = open_order.m_auxPrice
limit_price = open_order.m_lmtPrice
stop_price = None if stop_price == 0 else stop_price
limit_price = None if limit_price == 0 else limit_price
self._orders[zp_order_id] = ZPOrder(
dt=dt,
asset=asset,
amount=amount,
stop=stop_price,
limit=limit_price,
id=zp_order_id)
self._orders[zp_order_id].broker_order_id = ib_order_id
return self._orders[zp_order_id]
@staticmethod
def _ib_to_zp_status(ib_status):
ib_status = ib_status.lower()
if ib_status == 'submitted':
return ZP_ORDER_STATUS.OPEN
elif ib_status in ('pendingsubmit',
'pendingcancel',
'presubmitted'):
return ZP_ORDER_STATUS.HELD
elif ib_status == 'cancelled':
return ZP_ORDER_STATUS.CANCELLED
elif ib_status == 'filled':
return ZP_ORDER_STATUS.FILLED
elif ib_status == 'inactive':
return ZP_ORDER_STATUS.REJECTED
else:
return None
def _update_orders(self):
def _update_from_order_status(zp_order, ib_order_id):
if ib_order_id in self._tws.open_orders:
open_order_state = self._tws.open_orders[ib_order_id]['state']
zp_status = self._ib_to_zp_status(open_order_state.m_status)
if zp_status is None:
log.warning(
"Order-{order_id}: "
"unknown order status: {order_status}.".format(
order_id=ib_order_id,
order_status=open_order_state.m_status))
else:
zp_order.status = zp_status
if ib_order_id in self._tws.order_statuses:
order_status = self._tws.order_statuses[ib_order_id]
zp_order.filled = order_status['filled']
zp_status = self._ib_to_zp_status(order_status['status'])
if zp_status:
zp_order.status = zp_status
else:
log.warning("Order-{order_id}: "
"unknown order status: {order_status}."
.format(order_id=ib_order_id,
order_status=order_status['status']))
def _update_from_execution(zp_order, ib_order_id):
if ib_order_id in self._tws.executions and \
ib_order_id not in self._tws.open_orders:
zp_order.status = ZP_ORDER_STATUS.FILLED
executions = self._tws.executions[ib_order_id]
last_exec_detail = \
list(executions.values())[-1]['exec_detail']
zp_order.filled = last_exec_detail.m_cumQty
all_ib_order_ids = (set([e.broker_order_id
for e in self._orders.values()]) |
set(self._tws.open_orders.keys()) |
set(self._tws.order_statuses.keys()) |
set(self._tws.executions.keys()) |
set(self._tws.commissions.keys()))
for ib_order_id in all_ib_order_ids:
zp_order = self._get_or_create_zp_order(ib_order_id)
if zp_order:
_update_from_execution(zp_order, ib_order_id)
_update_from_order_status(zp_order, ib_order_id)
@property
def transactions(self):
self._update_transactions()
return self._transactions
def _update_transactions(self):
all_orders = list(self.orders.values())
for ib_order_id, executions in iteritems(self._tws.executions):
orders = [order
for order in all_orders
if order.broker_order_id == ib_order_id]
if not orders:
log.warning("No order found for executions: {}".format(
executions))
continue
assert len(orders) == 1
order = orders[0]
for exec_id, execution in iteritems(executions):
if exec_id in self._transactions:
continue
try:
commission = self._tws.commissions[ib_order_id][exec_id] \
.m_commission
except KeyError:
log.warning(
"Commission not found for execution: {}".format(
exec_id))
commission = 0
exec_detail = execution['exec_detail']
is_buy = order.amount > 0
amount = (exec_detail.m_shares if is_buy
else -1 * exec_detail.m_shares)
tx = Transaction(
asset=order.asset,
amount=amount,
dt= | pd.to_datetime(exec_detail.m_time, utc=True) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# # BCG Gamma Challenge
# # Libraries
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
# In[2]:
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# # Dataset
# In[3]:
df_municipios_2015 = pd.read_csv('../bcggammachallenge/municipios/municipios20150101.csv')
# In[4]:
df_municipios_2016 = | pd.read_csv('../bcggammachallenge/municipios/municipios20160101.csv') | pandas.read_csv |
# Copyright 2017-2021 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import pandas as pd
import numpy as np
import time
import requests
import json
import math
from moonshot.slippage import FixedSlippage
from moonshot.mixins import WeightAllocationMixin
from moonshot.cache import Cache
from moonshot.exceptions import MoonshotError, MoonshotParameterError
from quantrocket.price import get_prices
from quantrocket.master import list_calendar_statuses, download_master_file
from quantrocket.account import download_account_balances, download_exchange_rates
from quantrocket.blotter import list_positions, download_order_statuses
class Moonshot(
WeightAllocationMixin):
"""
Base class for Moonshot strategies.
To create a strategy, subclass this class. Implement your trading logic in the class
methods, and store your strategy parameters as class attributes.
Class attributes include built-in Moonshot parameters which you can override, as well
as your own custom parameters.
To run a backtest, at minimum you must implement `prices_to_signals`, but in general you will
want to implement the following methods (which are called in the order shown):
`prices_to_signals` -> `signals_to_target_weights` -> `target_weights_to_positions` -> `positions_to_gross_returns`
To trade (i.e. generate orders intended to be placed, but actually placed by other services
than Moonshot), you must also implement `order_stubs_to_orders`. Order generation for trading
follows the path shown below:
`prices_to_signals` -> `signals_to_target_weights` -> `order_stubs_to_orders`
Parameters
----------
CODE : str, required
the strategy code
DB : str, required
code of db to pull data from
DB_FIELDS : list of str, optional
fields to retrieve from db (defaults to ["Open", "Close", "Volume"])
DB_TIMES : list of str (HH:MM:SS), optional
for intraday databases, only retrieve these times
DB_DATA_FREQUENCY : str, optional
Only applicable when DB specifies a Zipline bundle. Whether to query minute or
daily data. If omitted, defaults to minute data for minute bundles and to daily
data for daily bundles. This parameter only needs to be set to request daily data
from a minute bundle. Possible choices: daily, minute (or aliases d, m).
SIDS : list of str, optional
limit db query to these sids
UNIVERSES : list of str, optional
limit db query to these universes
EXCLUDE_SIDS : list of str, optional
exclude these sids from db query
EXCLUDE_UNIVERSES : list of str, optional
exclude these universes from db query
CONT_FUT : str, optional
pass this cont_fut option to db query (default None)
LOOKBACK_WINDOW : int, optional
get this many days additional data prior to the backtest start date or
trade date to account for rolling windows. If set to None (the default),
will use the largest value of any attributes ending with `*_WINDOW`, or
252 if no such attributes, and will further pad window based on any
`*_INTERVAL` attributes, which are interpreted as pandas offset aliases
(for example `REBALANCE_INTERVAL = 'Q'`). Set to 0 to disable.
NLV : dict, optional
dict of currency:NLV for each currency represented in the strategy. Can
alternatively be passed directly to backtest method.
COMMISSION_CLASS : Class or dict of (sectype,exchange,currency):Class, optional
the commission class to use. If strategy includes a mix of security types,
exchanges, or currencies, you can pass a dict mapping tuples of
(sectype,exchange,currency) to the different commission classes. By default
no commission is applied.
SLIPPAGE_CLASSES : iterable of slippage classes, optional
one or more slippage classes. By default no slippage is applied.
SLIPPAGE_BPS : float, optional
amount on one-slippage to apply to each trade in BPS (for example, enter 5 to deduct
5 BPS)
BENCHMARK : str, optional
the sid of a security in the historical data to use as the benchmark
BENCHMARK_DB : str, optional
the database containing the benchmark, if different from DB. BENCHMARK_DB
should contain end-of-day data, not intraday (but can be used with intraday
backtests).
BENCHMARK_TIME : str (HH:MM:SS), optional
use prices from this time of day as benchmark prices. Only applicable if
benchmark prices originate in DB (not BENCHMARK_DB), DB contains intraday
data, and backtest results are daily.
TIMEZONE : str, optional
convert timestamps to this timezone (if not provided, will be inferred
from securities universe if possible)
CALENDAR : str, optional
use this exchange's trading calendar to determine which date's signals
should be used for live trading. If the exchange is currently open,
today's signals will be used. If currently closed, the signals corresponding
to the last date the exchange was open will be used. If no calendar is specified,
today's signals will be used.
POSITIONS_CLOSED_DAILY : bool
if True, positions in backtests that fall on adjacent days are assumed to
be closed out and reopened each day rather than held continuously; this
impacts commission and slippage calculations (default is False, meaning
adjacent positions are assumed to be held continuously)
ALLOW_REBALANCE : bool or float
in live trading, whether to allow rebalancing of existing positions that
are already on the correct side. If True (the default), allow rebalancing.
If False, no rebalancing. If set to a positive decimal, allow rebalancing
only when the existing position differs from the target position by at least
this percentage. For example 0.5 means don't rebalance a position unless
the position will change by +/-50%.
CONTRACT_VALUE_REFERENCE_FIELD : str, optional
the price field to use for determining contract values for the purpose of
applying commissions and constraining weights in backtests and calculating
order quantities in trading. Defaults to the first available of Close, Open,
MinuteCloseClose, SecondCloseClose, LastPriceClose, BidPriceClose, AskPriceClose,
TimeSalesLastPriceClose, TimeSalesFilteredLastPriceClose, LastPriceMean,
BidPriceMean, AskPriceMean, TimeSalesLastPriceMean, TimeSalesFilteredLastPriceMean,
MinuteOpenOpen, SecondOpenOpen, LastPriceOpen, BidPriceOpen, AskPriceOpen,
TimeSalesLastPriceOpen, TimeSalesFilteredLastPriceOpen.
ACCOUNT_BALANCE_FIELD : str or list of str, optional
the account field to use for calculating order quantities as a percentage of
account equity. Applies to trading only, not backtesting. Default is
NetLiquidation. If a list of fields is provided, the minimum value is used.
For example, ['NetLiquidation', 'PreviousEquity'] means to use the lesser of
NetLiquidation or PreviousEquity to determine order quantities.
Examples
--------
Example of a minimal strategy that runs on a history db called "mexi-stk-1d" and buys when
the securities are above their 200-day moving average:
>>> MexicoMovingAverage(Moonshot):
>>>
>>> CODE = "mexi-ma"
>>> DB = "mexi-stk-1d"
>>> MAVG_WINDOW = 200
>>>
>>> def prices_to_signals(self, prices):
>>> closes = prices.loc["Close"]
>>> mavgs = closes.rolling(self.MAVG_WINDOW).mean()
>>> signals = closes > mavgs.shift()
>>> return signals.astype(int)
"""
CODE = None
DB = None
DB_FIELDS = ["Open", "Close", "Volume"]
DB_TIMES = None
DB_DATA_FREQUENCY = None
SIDS = None
UNIVERSES = None
EXCLUDE_SIDS = None
EXCLUDE_UNIVERSES = None
CONT_FUT = None
LOOKBACK_WINDOW = None
NLV = None
COMMISSION_CLASS = None
SLIPPAGE_CLASSES = ()
SLIPPAGE_BPS = 0
BENCHMARK = None
BENCHMARK_DB = None
BENCHMARK_TIME = None
TIMEZONE = None
CALENDAR = None
POSITIONS_CLOSED_DAILY = False
ALLOW_REBALANCE = True
CONTRACT_VALUE_REFERENCE_FIELD = None
ACCOUNT_BALANCE_FIELD = None
def __init__(self):
self.is_trade = False
self.review_date = None # see trade() docstring
self.is_backtest = False
self._securities_master = None
self._backtest_results = {}
self._inferred_timezone = None
self._signal_date = None # set by _weights_to_today_weights
self._signal_time = None # set by _weights_to_today_weights
def prices_to_signals(self, prices):
"""
From a DataFrame of prices, return a DataFrame of signals. By convention,
signals should be 1=long, 0=cash, -1=short.
Must be implemented by strategy subclasses.
Parameters
----------
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
signals
Examples
--------
Buy when the close is above yesterday's 50-day moving average:
>>> def prices_to_signals(self, prices):
>>> closes = prices.loc["Close"]
>>> mavgs = closes.rolling(50).mean()
>>> signals = closes > mavgs.shift()
>>> return signals.astype(int)
"""
raise NotImplementedError("strategies must implement prices_to_signals")
def signals_to_target_weights(self, signals, prices):
"""
From a DataFrame of signals, return a DataFrame of target weights.
Whereas signals indicate the direction of the trades, weights
indicate both the direction and size. For example, -0.5 means a short
position equal to 50% of the equity allocated to the strategy.
Weights are used to help create orders in live trading, and to help
simulate executed positions in backtests.
The default implemention of this method evenly divides allocated
capital among the signals each period, but it is intended to be
overridden by strategy subclasses.
A variety of built-in weight allocation algorithms are provided by
and documented under `moonshot.mixins.WeightAllocationMixin`.
Parameters
----------
signals : DataFrame, required
a DataFrame of signals
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame
of price/market data
Returns
-------
DataFrame
weights
Examples
--------
The default implementation is shown below:
>>> def signals_to_target_weights(self, signals, prices):
>>> weights = self.allocate_equal_weights(signals) # provided by moonshot.mixins.WeightAllocationMixin
>>> return weights
"""
weights = self.allocate_equal_weights(signals)
return weights
def target_weights_to_positions(self, weights, prices):
"""
From a DataFrame of target weights, return a DataFrame of simulated
positions.
The positions should shift the weights based on when the weights
would be filled in live trading.
By default, assumes the position are taken in the period after the
weights were allocated. Intended to be overridden by strategy
subclasses.
Parameters
----------
weights : DataFrame, required
a DataFrame of weights
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
positions
Examples
--------
The default implemention is shown below (enter position in the period after
signal generation/weight allocation):
>>> def target_weights_to_positions(self, weights, prices):
>>> positions = weights.shift()
>>> return positions
"""
positions = weights.shift()
return positions
def positions_to_gross_returns(self, positions, prices):
"""
From a DataFrame of positions, return a DataFrame of returns before
commissions and slippage.
By default, assumes entry on the close on the period the position is
taken and calculates the return through the following period's close.
Intended to be overridden by strategy subclasses.
Parameters
----------
positions : DataFrame, required
a DataFrame of positions
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
gross returns
Examples
--------
The default implementation is shown below:
>>> def positions_to_gross_returns(self, positions, prices):
>>> closes = prices.loc["Close"]
>>> gross_returns = closes.pct_change() * positions.shift()
>>> return gross_returns
"""
closes = prices.loc["Close"]
gross_returns = closes.pct_change() * positions.shift()
return gross_returns
def order_stubs_to_orders(self, orders, prices):
"""
From a DataFrame of order stubs, creates a DataFrame of fully
specified orders.
Parameters
----------
orders : DataFrame
a DataFrame of order stubs, with columns Sid, Account, Action,
OrderRef, and TotalQuantity
prices : DataFrame
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
a DataFrame of fully specified orders, with (at minimum) columns
Exchange, Tif, OrderType added
Examples
--------
The orders DataFrame provided to this method resembles the following:
>>> print(orders)
Sid Account Action OrderRef TotalQuantity
0 12345 U12345 SELL my-strategy 100
1 12345 U55555 SELL my-strategy 50
2 23456 U12345 BUY my-strategy 100
3 23456 U55555 BUY my-strategy 50
4 34567 U12345 BUY my-strategy 200
5 34567 U55555 BUY my-strategy 100
The default implemention creates MKT DAY orders and is
shown below:
>>> def order_stubs_to_orders(self, orders, prices):
>>> orders["OrderType"] = "MKT"
>>> orders["Tif"] = "DAY"
>>> return orders
Set a limit price equal to the prior closing price:
>>> closes = prices.loc["Close"]
>>> prior_closes = closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
>>> orders["OrderType"] = "LMT"
>>> orders["LmtPrice"] = prior_closes
"""
orders["OrderType"] = "MKT"
orders["Tif"] = "DAY"
return orders
def reindex_like_orders(self, df, orders):
"""
Reindexes a DataFrame (having Sids as columns and dates as index)
to match the shape of the orders DataFrame.
Parameters
----------
df : DataFrame, required
a DataFrame of arbitrary values with Sids as columns and
dates as index
orders : DataFrame, required
an orders DataFrame with a Sid column
Returns
-------
Series
a Series with an index matching orders
Examples
--------
Calculate prior closes (assuming daily bars) and reindex like
orders:
>>> closes = prices.loc["Close"]
>>> prior_closes = closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
Calculate prior closes (assuming 30-min bars) and reindex like
orders:
>>> session_closes = prices.loc["Close"].xs("15:30:00", level="Time")
>>> prior_closes = session_closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
"""
df = df.loc[self._signal_date]
if "Time" in df.index.names:
if not self._signal_time:
raise MoonshotError(
"cannot reindex DataFrame like orders because DataFrame contains "
"'Time' in index, please take a cross-section first, for example: "
"`my_dataframe.xs('15:45:00', level='Time')`")
df = df.loc[self._signal_time]
df.name = "_MoonshotOther"
df = orders.join(df, on="Sid")._MoonshotOther
df.name = None
return df
def orders_to_child_orders(self, orders):
"""
From a DataFrame of orders, returns a DataFrame of child orders
(bracket orders) to be submitted if the parent orders fill.
An OrderId column will be added to the orders DataFrame, and child
orders will be linked to it via a ParentId column. The Action
(BUY/SELL) will be reversed on the child orders but otherwise the
child orders will be identical to the parent orders.
Parameters
----------
orders : DataFrame, required
an orders DataFrame
Returns
-------
DataFrame
a DataFrame of child orders
Examples
--------
>>> orders.head()
Sid Action TotalQuantity Exchange OrderType Tif
0 12345 BUY 200 SMART MKT Day
1 23456 BUY 400 SMART MKT Day
>>> child_orders = self.orders_to_child_orders(orders)
>>> child_orders.loc[:, "OrderType"] = "MOC"
>>> orders = pd.concat([orders,child_orders])
>>> orders.head()
Sid Action TotalQuantity Exchange OrderType Tif OrderId ParentId
0 12345 BUY 200 SMART MKT Day 0 NaN
1 23456 BUY 400 SMART MKT Day 1 NaN
0 12345 SELL 200 SMART MOC Day NaN 0
1 23456 SELL 400 SMART MOC Day NaN 1
"""
if "OrderId" not in orders.columns:
orders["OrderId"] = orders.index.astype(str) + ".{0}".format(time.time())
child_orders = orders.copy()
child_orders.rename(columns={"OrderId":"ParentId"}, inplace=True)
child_orders.loc[orders.Action=="BUY", "Action"] = "SELL"
child_orders.loc[orders.Action=="SELL", "Action"] = "BUY"
return child_orders
def _quantities_to_order_stubs(self, quantities):
"""
From a DataFrame of quantities to be ordered (with Sids as index,
Accounts as columns), returns a DataFrame of order stubs.
quantities in:
Account U12345 U55555
Sid
12345 -100 -50
23456 100 50
34567 200 100
order_stubs out:
Sid Account Action OrderRef TotalQuantity
0 12345 U12345 SELL my-strategy 100
1 12345 U55555 SELL my-strategy 50
2 23456 U12345 BUY my-strategy 100
3 23456 U55555 BUY my-strategy 50
4 34567 U12345 BUY my-strategy 200
5 34567 U55555 BUY my-strategy 100
"""
quantities.index.name = "Sid"
quantities.columns.name = "Account"
quantities = quantities.stack()
quantities.name = "Quantity"
order_stubs = quantities.to_frame().reset_index()
order_stubs["Action"] = np.where(order_stubs.Quantity > 0, "BUY", "SELL")
order_stubs = order_stubs.loc[order_stubs.Quantity != 0].copy()
order_stubs["OrderRef"] = self.CODE
order_stubs["TotalQuantity"] = order_stubs.Quantity.abs()
order_stubs = order_stubs.drop("Quantity",axis=1)
return order_stubs
def _get_nlv(self):
"""
Return a dict of currency:NLV for each currency in the strategy. By
default simply returns the NLV class attribute.
"""
return self.NLV
def _positions_to_turnover(self, positions):
"""
Given a dataframe of positions, returns a dataframe of turnover. 0
indicates no turnover; 1 indicates going from 100% short to cash or
cash to 100% long (for example), and vice versa; and 2 indicates
going from 100% short to %100 long (for example).
"""
# Intraday trades are opened and closed each day there's a position,
# so the turnover is twice the positions.
if self.POSITIONS_CLOSED_DAILY:
turnover = positions * 2
else:
turnover = positions.fillna(0).diff()
return turnover.abs()
def _weights_to_today_weights(self, weights, prices):
"""
From a DataFrame of target weights, extract the row that contains the
weights that should be used for today's trading. Returns a Series of
weights by sid:
Sid
12345 -0.2
23456 0
34567 0.1
The date whose weights are selected is usually today, but if CALENDAR
is used and the market is closed it will be the date when the market
closed. Can also be overridden by review_date.
For intraday strategies, the time whose weights are selected is the
latest time that is earlier than the time at which the strategy is
running.
"""
# First, get the signal date
# Use review_date if set
if self.review_date:
dt = pd.Timestamp(self.review_date)
# Else use trading calendar if provided
elif self.CALENDAR:
status = list_calendar_statuses([self.CALENDAR])[self.CALENDAR]
# If the exchange if closed, the signals should correspond to the
# date the exchange was last open
if status["status"] == "closed":
dt = pd.Timestamp(status["since"])
# If the exchange is open, the signals should correspond to
# today's date
else:
dt = pd.Timestamp.now(tz=status["timezone"])
# If no trading calendar, use today's date (in strategy timezone)
else:
tz = self.TIMEZONE or self._inferred_timezone
dt = pd.Timestamp.now(tz=tz)
# Keep only the date as the signal_date
self._signal_date = pd.Timestamp(dt.date())
# extract the current time (or review date time)
trade_time = dt.strftime("%H:%M:%S")
weights_is_intraday = "Time" in weights.index.names
try:
today_weights = weights.loc[self._signal_date]
except KeyError:
if weights_is_intraday:
max_date = weights.index.get_level_values("Date").max()
else:
max_date = weights.index.max()
msg = ("expected signal date {0} not found in target weights DataFrame, "
"is the underlying data up-to-date? (max date is {1})")
if not self.CALENDAR and not weights_is_intraday and self._signal_date.date() - max_date.date() == pd.Timedelta(days=1):
msg += (" If your strategy trades before the open and {0} data "
"is not expected, try setting CALENDAR = <exchange>")
raise MoonshotError(msg.format(
self._signal_date.date().isoformat(),
max_date.date().isoformat()))
if not weights_is_intraday:
print("using target weights for {0} to create orders".format(self._signal_date.date().isoformat()))
return today_weights
# For intraday strategies, select the weights from the latest time
# that is earlier than the trade time. Note that we select the
# expected time from the entire weights DataFrame, which will result
# in a failure if that time is missing for the trade date
unique_times = weights.index.get_level_values("Time").unique()
self._signal_time = unique_times[unique_times < trade_time].max()
if pd.isnull(self._signal_time):
msg = (
"cannot determine which target weights to use for orders because "
"target weights DataFrame contains no times earlier than trade time {0} "
"for signal date {1}".format(
trade_time,
self._signal_date.date().isoformat()))
if self.review_date:
msg += ", please adjust the review_date"
raise MoonshotError(msg)
# get_prices inserts all times into each day's index, thus
# the signal_time will be in the weights DataFrame even if the data
# is stale. Instead, to validate the data, we make sure that there is
# at least one nonnull field in the prices DataFrame at the
# signal_time on the signal_date
today_prices = prices.xs(self._signal_date, level="Date")
notnull_today_prices = today_prices[today_prices.notnull().any(axis=1)]
try:
no_signal_time_prices = notnull_today_prices.xs(self._signal_time, level="Time").empty
except KeyError:
no_signal_time_prices = True
if no_signal_time_prices:
msg = ("no {0} data found in prices DataFrame for signal date {1}, "
"is the underlying data up-to-date? (max time for {1} "
"is {2})")
notnull_max_date = notnull_today_prices.iloc[-1].name[-1]
raise MoonshotError(msg.format(
self._signal_time,
self._signal_date.date().isoformat(),
notnull_max_date))
today_weights = today_weights.loc[self._signal_time]
print("using target weights for {0} at {1} to create orders".format(
self._signal_date.date().isoformat(),
self._signal_time))
return today_weights
def _get_commissions(self, positions, prices):
"""
Returns the commissions to be subtracted from the returns.
"""
if not self.COMMISSION_CLASS:
return pd.DataFrame(0, index=positions.index, columns=positions.columns)
turnover = self._positions_to_turnover(positions)
contract_values = self._get_contract_values(prices)
prices_is_intraday = "Time" in prices.index.names
positions_is_intraday = "Time" in positions.index.names
if prices_is_intraday and not positions_is_intraday:
contract_values = contract_values.groupby(
contract_values.index.get_level_values("Date")).first()
fields = prices.index.get_level_values("Field").unique()
if "Nlv" in self._securities_master.columns:
nlvs = contract_values.apply(lambda x: self._securities_master.Nlv, axis=1)
else:
nlvs = None
# handle the case of only one commission class
if not isinstance(self.COMMISSION_CLASS, dict):
commissions = self.COMMISSION_CLASS.get_commissions(contract_values, turnover=turnover, nlvs=nlvs)
return commissions
# handle multiple commission classes per sectype/exchange/currency
# first, tuple-ize the dict keys in case they are lists
commission_classes = {}
for sec_group, commission_cls in self.COMMISSION_CLASS.items():
commission_classes[tuple(sec_group)] = commission_cls
defined_sec_groups = set([tuple(k) for k in commission_classes.keys()])
# Reindex master fields like contract_values
sec_types = contract_values.apply(lambda x: self._securities_master.SecType, axis=1)
exchanges = contract_values.apply(lambda x: self._securities_master.Exchange, axis=1)
currencies = contract_values.apply(lambda x: self._securities_master.Currency, axis=1)
required_sec_groups = set([
tuple(s.split("|")) for s in (sec_types+"|"+exchanges+"|"+currencies).iloc[-1].unique()])
missing_sec_groups = required_sec_groups - defined_sec_groups
if missing_sec_groups:
raise MoonshotParameterError("expected a commission class for each combination of (sectype,exchange,currency) "
"but none is defined for {0}".format(
", ".join(["({0})".format(",".join(t)) for t in missing_sec_groups])))
all_commissions = pd.DataFrame(None, index=positions.index, columns=positions.columns)
for sec_group in required_sec_groups:
commission_cls = commission_classes[sec_group]
sec_type, exchange, currency = sec_group
sec_group_commissions = commission_cls.get_commissions(
contract_values, turnover=turnover, nlvs=nlvs)
in_sec_group = (sec_types == sec_type) & (exchanges == exchange) & (currencies == currency)
all_commissions = sec_group_commissions.where(in_sec_group, all_commissions)
return all_commissions
def _get_slippage(self, positions, prices):
"""
Returns the slippage to be subtracted from the returns.
"""
turnover = self._positions_to_turnover(positions)
slippage = pd.DataFrame(0, index=turnover.index, columns=turnover.columns)
slippage_classes = self.SLIPPAGE_CLASSES or ()
if not isinstance(slippage_classes, (list, tuple)):
slippage_classes = [slippage_classes]
for slippage_class in slippage_classes:
slippage += slippage_class().get_slippage(turnover, positions, prices)
if self.SLIPPAGE_BPS:
slippage += FixedSlippage(self.SLIPPAGE_BPS/10000.0).get_slippage(turnover, positions, prices)
return slippage.fillna(0)
def _constrain_weights(self, weights, prices):
"""
Constrains the weights by the quantity constraints defined in
limit_position_sizes.
"""
max_quantities_for_longs, max_quantities_for_shorts = self.limit_position_sizes(prices)
if max_quantities_for_longs is None and max_quantities_for_shorts is None:
return weights
if "Nlv" not in self._securities_master.columns:
raise MoonshotParameterError("must provide NLVs if using limit_position_sizes")
contract_values = self._get_contract_values(prices)
contract_values = contract_values.fillna(method="ffill")
nlvs_in_trade_currency = contract_values.apply(lambda x: self._securities_master.Nlv, axis=1)
prices_is_intraday = "Time" in prices.index.names
weights_is_intraday = "Time" in weights.index.names
if prices_is_intraday and not weights_is_intraday:
# we somewhat arbitrarily pick the contract value as of the
# earliest time of day; this contract value might be somewhat
# stale but it avoids the possible lookahead bias of using, say,
# the contract value as of the latest time of day. We could ask
# the user to supply a time but that is rather clunky.
earliest_time = prices.index.get_level_values("Time").unique().min()
contract_values = contract_values.xs(earliest_time, level="Time")
nlvs_in_trade_currency = nlvs_in_trade_currency.xs(earliest_time, level="Time")
# Convert weights to quantities
trade_values_in_trade_currency = weights * nlvs_in_trade_currency
# Note: we take abs() of contract_values because combos can have
# negative prices which would invert the sign of the trade
quantities = trade_values_in_trade_currency / contract_values.where(contract_values != 0).abs()
quantities = quantities.round().fillna(0).astype(int)
# Constrain quantities
if max_quantities_for_longs is not None:
max_quantities_for_longs = max_quantities_for_longs.abs()
quantities = max_quantities_for_longs.where(
quantities > max_quantities_for_longs, quantities)
if max_quantities_for_shorts is not None:
max_quantities_for_shorts = -max_quantities_for_shorts.abs()
quantities = max_quantities_for_shorts.where(
quantities < max_quantities_for_shorts, quantities)
# Convert quantities back to weights
target_trade_values_in_trade_currency = quantities * contract_values
weights = target_trade_values_in_trade_currency / nlvs_in_trade_currency
return weights
def limit_position_sizes(self, prices):
"""
This method should return a tuple of DataFrames::
return max_quantities_for_longs, max_quantities_for_shorts
where the DataFrames define the maximum number of shares/contracts
that can be held long and short, respectively. Maximum limits might
be based on available liquidity (recent volume), shortable shares
available, etc.
The shape and alignment of the returned DataFrames should match that of the
target_weights returned by `signals_to_target_weights`. Target weights will be
reduced, if necessary, based on max_quantities_for_longs and max_quantities_for_shorts.
Return None for one or both DataFrames to indicate "no limits."
For example to limit shorts but not longs::
return None, max_quantities_for_shorts
Within a DataFrame, any None or NaNs will be treated as "no limit" for that
particular security and date.
Note that max_quantities_for_shorts can equivalently be represented with
positive or negative numbers. This is OK::
AAPL
2018-05-18 100
2018-05-19 100
This is also OK::
AAPL
2018-05-18 -100
2018-05-19 -100
Both of the above DataFrames would mean: short no more than 100 shares of
AAPL.
Parameters
----------
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
tuple of (DataFrame, DataFrame)
max quantities for long, max quantities for shorts
Examples
--------
Limit quantities to 1% of 15-day average daily volume:
>>> def limit_position_sizes(self, prices):
>>> # assumes end-of-day bars, for intraday bars, use `.xs` to
>>> # select a time of day
>>> volumes = prices.loc["Volume"]
>>> mean_volumes = volumes.rolling(15).mean()
>>> max_shares = (mean_volumes * 0.01).round()
>>> max_quantities_for_longs = max_quantities_for_shorts = max_shares
>>> return max_quantities_for_longs, max_quantities_for_shorts
"""
max_quantities_for_longs = None
max_quantities_for_shorts = None
return max_quantities_for_longs, max_quantities_for_shorts
@classmethod
def _get_lookback_window(cls):
"""
Returns cls.LOOKBACK_WINDOW if set, otherwise infers the lookback
window from `_WINDOW`, defaulting to 252. Then increases the lookback
based on `_INTERVAL` attributes, which are interpreted as pandas
frequencies (for example `REBALANCE_INTERVAL` = 'Q'). This ensures the
lookback is sufficient when resampling to quarterly etc. for periodic
rebalancing.
"""
if cls.LOOKBACK_WINDOW is not None:
return cls.LOOKBACK_WINDOW
window_attrs = [getattr(cls, attr) for attr in dir(cls) if attr.endswith("_WINDOW")]
windows = [attr for attr in window_attrs if isinstance(attr, int)]
lookback_window = max(windows) if windows else 252
# Add _INTERVAL if any
offset_aliases = [getattr(cls, attr) for attr in dir(cls) if attr.endswith("_INTERVAL")]
intervals = []
for freq in offset_aliases:
if not freq:
continue
try:
periods = pd.date_range(start=pd.to_datetime('today'),
freq=freq, periods=2)
except ValueError:
continue
# Use the period date range to count bdays in period
bdays = len(pd.bdate_range(start=periods[0], end=periods[1]))
intervals.append(bdays)
if intervals:
lookback_window += max(intervals)
return lookback_window
def _load_master_file(self, sids, nlv=None, no_cache=False):
"""
Loads master file from cache or master service.
"""
securities = None
fields = [
"Currency", "Multiplier", "PriceMagnifier",
"Exchange", "SecType", "Symbol", "Timezone"]
if self.is_backtest and not no_cache:
# try to load from cache
securities = Cache.get(sids, prefix="_master")
if securities is None:
# query master
f = io.StringIO()
download_master_file(
f,
sids=sids,
fields=fields)
securities = pd.read_csv(f, index_col="Sid")
if self.is_backtest:
Cache.set(sids, securities, prefix="_master")
if not self.TIMEZONE:
timezones = securities.Timezone.unique()
if len(timezones) > 1:
raise MoonshotParameterError(
"cannot infer timezone because multiple timezones are present "
"in data, please specify TIMEZONE explicitly (timezones: {0})".format(
", ".join(timezones)))
self._inferred_timezone = timezones[0]
# Append NLV if applicable
nlvs = nlv or self._get_nlv()
if nlvs:
# For FX, store NLV based on the quote currency (extracted from the Symbol)
# not Currency (100 EUR.USD = 100 EUR, not 100 USD)
currencies = securities.Symbol.astype(str).str.split(".").str[0].where(
securities.SecType=="CASH", securities.Currency)
missing_nlvs = set(currencies) - set(nlvs.keys())
if missing_nlvs:
raise MoonshotParameterError(
"NLV dict is missing values for required currencies: {0}".format(
", ".join(missing_nlvs)))
securities["Nlv"] = currencies.apply(lambda currency: nlvs.get(currency, None))
self._securities_master = securities.sort_index()
@classmethod
def _get_start_date_with_lookback(cls, start_date):
"""
Returns the start_date adjusted to incorporate the LOOKBACK_WINDOW,
plus a buffer. LOOKBACK_WINDOW is measured in trading days, but we
query the db in calendar days. Convert from weekdays (260 per year)
to calendar days, assuming 25 holidays (NYSE has ~9 per year, TSEJ
has ~19), plus a buffer (which varies by window size) to be safe.
"""
lookback_window = cls._get_lookback_window()
days_per_year = 365
weekdays_per_year = 260
max_holidays_per_year = 25
trading_days_per_year = weekdays_per_year - max_holidays_per_year
# Vary the buffer by the window length (for very short windows, the
# user might not want to load too much data so we want to keep the
# buffer reasonably small)
# No window, no buffer
if lookback_window == 0:
buffer = 0
# for window < 1 week, a 2 day buffer (plus the calendar day to
# trading day conversion) will suffice
elif lookback_window <= 5:
buffer = 2
# longer than a week, err on the side of loading ample data
else:
buffer = 10
start_date = pd.Timestamp(start_date) - pd.Timedelta(
days=math.ceil(lookback_window*days_per_year/trading_days_per_year) + buffer)
return start_date.date().isoformat()
def get_prices(self, start_date, end_date=None, nlv=None, no_cache=False):
"""
Downloads prices from a history db and/or real-time aggregate db.
Downloads security details from the master db.
"""
if start_date:
start_date = self._get_start_date_with_lookback(start_date)
codes = self.DB
if not isinstance(codes, (list, tuple)):
codes = [self.DB]
sids = self.SIDS or []
# Add benchmark sid if needed. It's needed if there is no
# BENCHMARK_DB, and sids or universes are specified (if they're
# not specified, the whole db will be queried, including the
# benchmark)
if (
self.is_backtest
and self.BENCHMARK
and not self.BENCHMARK_DB
and (sids or self.UNIVERSES)
):
sids = list(sids).copy()
sids.append(self.BENCHMARK)
kwargs = dict(
codes=codes,
start_date=start_date,
end_date=end_date,
universes=self.UNIVERSES,
sids=sids,
exclude_universes=self.EXCLUDE_UNIVERSES,
exclude_sids=self.EXCLUDE_SIDS,
times=self.DB_TIMES,
cont_fut=self.CONT_FUT,
fields=self.DB_FIELDS,
timezone=self.TIMEZONE,
data_frequency=self.DB_DATA_FREQUENCY
)
if not self.TIMEZONE:
kwargs["infer_timezone"] = True
prices = None
if self.is_backtest and not no_cache:
# If no end_date is specified (indicating the user wants
# up-to-date history), we don't want to use the cache if the dbs
# were more recently modified (indicating new data collection).
# If there's an end date, we use the cache if possible. (The user
# can use --no-cache to disable cache usage if needed.)
if not end_date:
unless_dbs_modified = {
"services": ["history", "realtime"],
"codes": codes}
else:
unless_dbs_modified = None
# try to load from cache
prices = Cache.get(kwargs, prefix="_history", unless_dbs_modified=unless_dbs_modified)
if prices is None:
prices = get_prices(**kwargs)
if self.is_backtest:
Cache.set(kwargs, prices, prefix="_history")
self._load_master_file(prices.columns.tolist(), nlv=nlv, no_cache=no_cache)
return prices
def _prices_to_signals(self, prices, **kwargs):
"""
Converts a prices DataFrame to a DataFrame of signals. This private
method, which simply calls the user-modified public method
`prices_to_signals`, exists for the benefit of the MoonshotML
subclass, which overrides it.
"""
return self.prices_to_signals(prices)
def backtest(self, start_date=None, end_date=None, nlv=None, allocation=1.0,
label_sids=False, no_cache=False):
"""
Backtest a strategy and return a DataFrame of results.
Parameters
----------
start_date : str (YYYY-MM-DD), optional
the backtest start date (default is to include all history in db)
end_date : str (YYYY-MM-DD), optional
the backtest end date (default is to include all history in db)
nlv : dict
dict of currency:nlv. Should contain a currency:nlv pair for
each currency represented in the strategy
allocation : float
how much to allocate to the strategy
label_sids : bool
replace <Sid> with <Symbol>(<Sid>) in columns in output
for better readability (default True)
no_cache : bool
don't use cached files even if available. Using cached files speeds
up backtests but may be undesirable if underlying data has changed.
See http://qrok.it/h/mcache to learn more about caching in Moonshot.
Returns
-------
DataFrame
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
backtest results
"""
self.is_backtest = True
allocation = allocation or 1.0
prices = self.get_prices(start_date, end_date, nlv=nlv, no_cache=no_cache)
signals = self._prices_to_signals(prices, no_cache=no_cache)
weights = self.signals_to_target_weights(signals, prices)
weights = weights * allocation
weights = self._constrain_weights(weights, prices)
positions = self.target_weights_to_positions(weights, prices)
gross_returns = self.positions_to_gross_returns(positions, prices)
commissions = self._get_commissions(positions, prices)
slippages = self._get_slippage(positions, prices)
returns = gross_returns.fillna(0) - commissions - slippages
turnover = self._positions_to_turnover(positions)
total_holdings = (positions.fillna(0) != 0).astype(int)
results_are_intraday = "Time" in signals.index.names
all_results = dict(
AbsExposure=positions.abs(),
AbsWeight=weights.abs(),
Commission=commissions,
NetExposure=positions,
Return=returns,
Signal=signals,
Slippage=slippages,
TotalHoldings=total_holdings,
Turnover=turnover,
Weight=weights)
# validate that custom backtest results are daily if results are
# daily
for custom_name, custom_df in self._backtest_results.items():
if "Time" in custom_df.index.names and not results_are_intraday:
raise MoonshotParameterError(
"custom DataFrame '{0}' won't concat properly with 'Time' in index, "
"please take a cross-section first, for example: "
"`my_dataframe.xs('15:45:00', level='Time')`".format(custom_name))
all_results.update(self._backtest_results)
if self.BENCHMARK:
all_results["Benchmark"] = self._get_benchmark(prices, daily=not results_are_intraday)
results = pd.concat(all_results, keys=list(sorted(all_results.keys())))
names = ["Field","Date"]
if results.index.nlevels == 3:
names.append("Time")
results.index.set_names(names, inplace=True)
if label_sids:
symbols = self._securities_master.Symbol
symbols_with_sids = symbols.astype(str) + "(" + symbols.index.astype(str) + ")"
results.rename(columns=symbols_with_sids.to_dict(), inplace=True)
# truncate at requested start_date
if start_date:
results = results.iloc[
results.index.get_level_values("Date") >= pd.Timestamp(start_date)]
return results
def _get_benchmark(self, prices, daily=True):
"""
Returns a 1-column DataFrame of benchmark prices, either extracted
from prices or queried from BENCHMARK_DB if defined.
BENCHMARK_DB, if used, must contain end-of-day prices.
If prices are intraday and daily=True, the returned benchmark prices
will be daily; if this is the case and benchmark prices are extracted
from the prices DataFrame, BENCHMARK_TIME will be used to extract
daily prices.
If prices are intraday and daily=False, intraday benchmark prices
will be returned; if this is the case and BENCHMARK_DB is used, the
daily benchmark prices will be broadcast across the entire intraday
timeframe.
"""
if self.BENCHMARK_DB:
try:
benchmark_prices = get_prices(
self.BENCHMARK_DB,
sids=self.BENCHMARK,
start_date=prices.index.get_level_values("Date").min(),
end_date=prices.index.get_level_values("Date").max(),
fields="Close",
# if this is a minute Zipline bundle, we want to query
# daily bars; data_frequency is ignored if this is not
# a Zipline bundle
data_frequency="daily"
)
except requests.HTTPError as e:
raise MoonshotError("error querying BENCHMARK_DB {0}: {1}".format(
self.BENCHMARK_DB, repr(e)
))
benchmark_prices = benchmark_prices.loc["Close"]
if "Time" in benchmark_prices.index.names:
raise MoonshotParameterError(
"only end-of-day databases are supported for BENCHMARK_DB but {0} is intraday".format(
self.BENCHMARK_DB))
# Reindex benchmark prices like prices
first_prices_field = prices.loc[prices.index.get_level_values("Field")[0]]
# either reindex daily to daily (end-of-day backtests)
if "Time" not in first_prices_field.index.names:
benchmark_prices = benchmark_prices.reindex(index=first_prices_field.index)
else:
# or reindex daily to intraday daily (continuous intraday backtests)
benchmark_prices = benchmark_prices.reindex(index=first_prices_field.index, level="Date")
# and possibly back to daily (once-a-day intraday backtests)
if daily:
benchmark_prices = benchmark_prices.groupby(
benchmark_prices.index.get_level_values("Date")).last()
benchmark_db = self.BENCHMARK_DB
else:
benchmark_prices = prices
benchmark_db = self.DB
field = None
fields = benchmark_prices.index.get_level_values("Field").unique()
candidate_fields = ("Close", "Open", "Bid", "Ask", "High", "Low")
for candidate in candidate_fields:
if candidate in fields:
field = candidate
break
else:
raise MoonshotParameterError("Cannot extract BENCHMARK {0} from {1} data without one of {2}".format(
self.BENCHMARK, benchmark_db, ", ".join(candidate_fields)))
benchmark_prices = benchmark_prices.loc[field]
try:
benchmark = benchmark_prices[self.BENCHMARK]
except KeyError:
raise MoonshotError("BENCHMARK Sid {0} is not in {1} data".format(
self.BENCHMARK, benchmark_db))
# to avoid inserting an extra column in the results DataFrame,
# store the benchmark prices under the first column
if self.BENCHMARK_DB:
benchmark.name = prices.columns[0]
if "Time" in benchmark_prices.index.names and daily:
if not self.BENCHMARK_TIME:
raise MoonshotParameterError(
"Cannot extract BENCHMARK {0} from {1} data because prices contains intraday "
"prices but no BENCHMARK_TIME specified".format(self.BENCHMARK, benchmark_db))
try:
benchmark = benchmark.xs(self.BENCHMARK_TIME, level="Time")
except KeyError:
raise MoonshotError("BENCHMARK_TIME {0} is not in {1} data".format(
self.BENCHMARK_TIME, benchmark_db))
return pd.DataFrame(benchmark)
def save_to_results(self, name, df):
"""
Saves the DataFrame to the backtest results output.
DataFrame should have a Date or (Date, Time) index with
Sids as columns.
Parameters
----------
name : str, required
the name to assign to the DataFrame
df : DataFrame, required
the DataFrame to save
Returns
-------
None
Examples
--------
Save moving averages of closing prices to results:
>>> closes = prices.loc["Close"]
>>> mavgs = closes.rolling(50).mean()
>>> self.save_to_results("MAvg", mavgs)
"""
# No-op if trading
if self.is_trade:
return
reserved_names = [
"Signal",
"Weight",
"AbsWeight",
"AbsExposure",
"NetExposure",
"Turnover",
"TotalHolding",
"Commission",
"Slippage",
"Return",
"Benchmark"
]
if name in reserved_names:
raise ValueError("name {0} is a reserved name".format(name))
index_levels = df.index.names
if "Date" not in index_levels:
raise MoonshotParameterError(
"custom DataFrame '{0}' must have index called 'Date' to concat properly, but has {1}".format(
name, ",".join([str(level_name) for level_name in index_levels])))
if not hasattr(df.index.get_level_values("Date"), "date"):
raise MoonshotParameterError("custom DataFrame '{0}' must have a DatetimeIndex to concat properly".format(name))
self._backtest_results[name] = df
def trade(self, allocations, review_date=None):
"""
Run the strategy and create orders.
Parameters
----------
allocations : dict, required
dict of account:allocation to strategy (expressed as a percentage of NLV)
review_date : str (YYYY-MM-DD [HH:MM:SS]), optional
generate orders as if it were this date, rather than using the latest date.
For end-of-day strategies, provide a date; for intraday strategies a date
and time
Returns
-------
DataFrame
orders
"""
self.is_trade = True
self.review_date = review_date
start_date = review_date or pd.Timestamp.today()
prices = self.get_prices(start_date)
prices_is_intraday = "Time" in prices.index.names
signals = self._prices_to_signals(prices)
weights = self.signals_to_target_weights(signals, prices)
weights = self._weights_to_today_weights(weights, prices)
allocations = pd.Series(allocations)
# Out:
# U12345 0.25
# U55555 0.50
# Multiply weights times allocations
weights = weights.apply(lambda x: x * allocations)
# Out:
# U12345 U55555
# 12345 -0.050 -0.10
# 23456 0.000 0.00
# 34567 0.025 0.05
contract_values = self._get_contract_values(prices)
contract_values = contract_values.fillna(method="ffill").loc[self._signal_date]
if prices_is_intraday:
if self._signal_time:
contract_values = contract_values.loc[self._signal_time]
else:
contract_values = contract_values.iloc[-1]
contract_values = allocations.apply(lambda x: contract_values).T
# Out:
# U12345 U55555
# 12345 95.68 95.68
# 23456 1500.00 1500.00
# 34567 3600.00 3600.00
currencies = self._securities_master.Currency
sec_types = self._securities_master.SecType
# For FX, exchange rate conversions should be based on the quote currency
# (extracted from the Symbol), not the currency (i.e. 100 EUR.USD = 100 EUR,
# not 100 USD)
if (sec_types == "CASH").any():
quote_currencies = self._securities_master.Symbol.astype(str).str.split(".").str[0]
currencies = currencies.where(sec_types != "CASH", quote_currencies)
account_balance_fields = self.ACCOUNT_BALANCE_FIELD or "NetLiquidation"
if not isinstance(account_balance_fields, (list, tuple)):
account_balance_fields = [account_balance_fields]
f = io.StringIO()
download_account_balances(
f,
latest=True,
accounts=list(allocations.index),
fields=account_balance_fields)
balances = pd.read_csv(f)
# Cast account numbers to strings
balances["Account"] = balances.Account.astype(str)
balances = balances.set_index("Account")
f = io.StringIO()
download_exchange_rates(
f, latest=True,
base_currencies=list(balances.Currency.unique()),
quote_currencies=list(currencies.unique()))
exchange_rates = pd.read_csv(f)
# Use the lesser field if multiple fields were given (see class docstring)
nlvs = balances[account_balance_fields].min(axis=1).reindex(allocations.index)
# Out:
# U12345 1000000
# U55555 500000
nlvs = weights.apply(lambda x: nlvs, axis=1)
# Out:
# U12345 U55555
# 12345 1000000 500000
# 23456 1000000 500000
# 34567 1000000 500000
base_currencies = balances.Currency.reindex(allocations.index)
# Out:
# U12345 USD
# U55555 EUR
base_currencies = weights.apply(lambda x: base_currencies, axis=1)
# Out:
# U12345 U55555
# 12345 USD EUR
# 23456 USD EUR
# 34567 USD EUR
trade_currencies = allocations.apply(lambda x: currencies).T
# Out:
# U12345 U55555
# 12345 USD USD
# 23456 JPY JPY
# 34567 JPY JPY
base_currencies = base_currencies.stack()
trade_currencies= trade_currencies.stack()
base_currencies.name = "BaseCurrency"
trade_currencies.name = "QuoteCurrency"
currencies = | pd.concat((base_currencies,trade_currencies), axis=1) | pandas.concat |
# # -*- coding: utf-8 -*-
import argparse
import itertools
import logging.config
import os
import sys
from collections import Counter
from multiprocessing import Pool, cpu_count
import numpy as np
import pandas as pd
from pandas import DataFrame
src = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(src, "data"))
sys.path.append(os.path.join(src, "features"))
import multiprocess_utils as multi_utils
import preprocess as prep
import build_features as feat
# TODO: Integrate in the future
# AGGREGATE_COLUMNS = ['Languages', 'Locations', 'DeviceCategories',
# 'TrafficSources', 'TrafficMediums', 'NetworkLocations', 'sessionID',
# 'Times', 'Dates', 'Time_Spent', 'userID']
# TODO: Extend with more BigQuery fields. Pre-defined columns will be aggregated
COUNTABLE_AGGREGATE_COLUMNS = ['Languages', 'Locations', 'DeviceCategories', 'DeviceCategory', 'TrafficSources',
'TrafficMediums', 'NetworkLocations', 'Dates']
# TODO: Extend with more BigQuery fields. Pre-defined columns will be aggregated
SLICEABLE_COLUMNS = ['Occurrences', 'Languages', 'Locations', 'DeviceCategories', 'DeviceCategory', 'TrafficSources',
'TrafficMediums', 'NetworkLocations', 'Dates']
# Columns to drop post-internal processing if DROP_ONE_OFFS is true: these are initialized in order to set up
# "PageSequence" which is used for journey drops instead of "Sequence" which includes events, hence making journeys
# overall more infrequent.
DROPABLE_COLS = ['Page_Event_List', 'Page_List']
# Execute module for only one file
SINGLE: bool = False
# Fewer files to process than available cpus.
FEWER_THAN_CPU: bool = False
# Drop journeys occurring once (not in a day, multiple days, governed by DEPTH globals). If false, overrides depth
# globals and keeps journeys, resulting in massive dataframes (danger zone).
DROP_ONE_OFFS: bool = False
# Drop journeys of length 1
DROP_ONES: bool = False
# Keep only journeys of length 1
KEEP_ONES: bool = False
# Maximum recursive depth for distribution function
MAX_DEPTH: int = -1
# Recursive depth limit for distribution function, so one-off journeys are drop in time.
DEPTH_LIM: int = 1
# If there are many files to be merge, load in/preprocess in batches
BATCH_SIZE: int = 3
# A bit of a magic number, but limits dataframes that can be passed off to workers. If dataframe exceeds this size,
# switch to sequential execution.
ROW_LIMIT: int = 1000000
def list_to_dict(metadata_list):
"""
Transform metadata lists to dictionary aggregates
:param metadata_list:
:return:
"""
return list(Counter([xs for xs in metadata_list]).items())
def str_to_dict(metadata_str):
"""
Transform metadata string eg mobile,desktop,mobile to [(mobile,2),(desktop,1)] dict-like
list.
:param metadata_str:
:return: dict-like list of frequencies
"""
# print(metadata_str)
return list_to_dict(metadata_str.split(','))
def aggregate_dict(metadata_list):
"""
Aggregate over multiple metadata frequency lists, sum up frequencies over course of multiple days.
:param metadata_list:
:return: dict-like list of frequencies
"""
metadata_counter = {}
for meta in metadata_list:
for key, value in meta:
if key not in metadata_counter:
metadata_counter[key] = value
else:
metadata_counter[key] += value
return list(metadata_counter.items())
def zip_aggregate_metadata(user_journey_df):
"""
TODO: needs more work, right now it is dependant on hardcoded df column specification. Not used atm
:param user_journey_df:
:return:
"""
col = []
for tup in user_journey_df.itertuples():
locs = tup.Locations.split(',')
langs = tup.Languages.split(',')
devs = tup.DeviceCategories.split(',')
zipped_meta_counter = Counter()
for loc, lang, dev in zip(locs, langs, devs):
zipped_meta_counter[(loc, lang, dev)] += 1
col.append(list(zipped_meta_counter.items()))
user_journey_df['AggMeta'] = col
def sequence_preprocess(user_journey_df):
"""
Bulk-execute main input pre-processing functions: from BigQuery journey strings to Page_Event_List to Page_List.
PageSequence required for dataframes groupbys/filtering.
:param user_journey_df: dataframe
:return: no return, columns added in place.
"""
logger.info("BQ Sequence string to Page_Event_List...")
user_journey_df['Page_Event_List'] = user_journey_df['Sequence'].map(prep.bq_journey_to_pe_list)
logger.info("Page_Event_List to Page_List...")
user_journey_df['Page_List'] = user_journey_df['Page_Event_List'].map(lambda x: prep.extract_pe_components(x, 0))
logger.info("Page_List to PageSequence...")
# TODO: Remove condition + internal PageSequence post-testing/debugging.
if 'PageSequence' not in user_journey_df.columns:
user_journey_df['PageSequence'] = user_journey_df['Page_List'].map(lambda x: ">>".join(x))
else:
user_journey_df['PageSequence_internal'] = user_journey_df['Page_List'].map(lambda x: ">>".join(x))
def event_preprocess(user_journey_df):
"""
Bulk-execute event related functions... Run after sequence_preprocess(user_journey_df) so that
Page_Event_List column exists
:param user_journey_df: dataframe
:return: no return, columns added in place.
"""
logger.info("Preprocess and aggregate events...")
logger.debug("Page_Event_List to Event_List...")
user_journey_df['Event_List'] = user_journey_df['Page_Event_List'].map(lambda x: prep.extract_pe_components(x, 1))
logger.debug("Computing event-related counts and frequencies...")
event_counters(user_journey_df)
def taxon_preprocess(user_journey_df):
"""
Bulk map functions for event frequency/counts.
:param user_journey_df: dataframe
:return: no return, columns added in place.
"""
logger.info("Preprocess taxons...")
logger.debug("Page_Event_List to Taxon_List...")
user_journey_df['Taxon_List'] = user_journey_df['Page_Event_List'].map(lambda x: prep.extract_cd_components(x, 2))
logger.debug("Page_Event_List to Taxon_Page_List...")
user_journey_df['Taxon_Page_List'] = user_journey_df['Page_Event_List'].map(lambda x: prep.extract_pcd_list(x, 2))
def event_counters(user_journey_df):
"""
Bulk map functions for event frequency/counts.
:param user_journey_df: dataframe
:return: no return, columns added in place.
"""
logger.debug("Computing number of event categories...")
user_journey_df['num_event_cats'] = user_journey_df['Event_List'].map(feat.count_event_cat)
logger.debug("Computing frequency of event categories...")
user_journey_df['Event_cats_agg'] = user_journey_df['Event_List'].map(feat.aggregate_event_cat)
logger.debug("Computing frequency of event categories and actions...")
user_journey_df['Event_cat_act_agg'] = user_journey_df['Event_List'].map(feat.aggregate_event_cat_act)
def add_loop_columns(user_journey_df):
"""
Bulk map functions for event frequency/counts.
:param user_journey_df: dataframe
:return: no return, columns added in place.
"""
logger.info("Preprocess journey looping...")
logger.debug("Collapsing loops...")
user_journey_df['Page_List_NL'] = user_journey_df['Page_List'].map(prep.collapse_loop)
# In order to groupby during analysis step
logger.debug("De-looped lists to string...")
user_journey_df['Page_Seq_NL'] = user_journey_df['Page_List_NL'].map(lambda x: ">>".join(x))
if 'Page_Seq_Occurrences' not in user_journey_df.columns:
logger.debug("Setting up Page_Seq_Occurrences...")
user_journey_df['Page_Seq_Occurrences'] = user_journey_df.groupby('PageSequence')['Occurrences'].transform(
'sum')
# Count occurrences of de-looped journeys, most generic journey frequency metric.
logger.debug("Aggregating de-looped journey occurrences...")
user_journey_df['Occurrences_NL'] = user_journey_df.groupby('Page_Seq_NL')['Occurrences'].transform('sum')
logger.debug("De-looped page sequence to list...")
user_journey_df['Page_List_NL'] = user_journey_df['Page_Seq_NL'].map(
lambda x: x.split(">>") if isinstance(x, str) else np.NaN)
def groupby_meta(df_slice: DataFrame, depth: int, multiple_dfs: bool):
"""
Aggregate specified metadata column. If it's the first recursive run, transform aggregate metadata string to a
dict-like list.
:param df_slice: specified metadata column (refer to AGGREGATE_COLUMNS values)
:param depth: (int) recursive call tracker, depth = 0 indicates first recursive call
:param multiple_dfs: (boolean) indicates whether many dfs have been merged and require grouping by
:return: no return, mapping and drops happen inplace on df_slice.
"""
agg = df_slice.columns[1]
print("multiple dfs:", multiple_dfs)
# One-off
if isinstance(df_slice[agg].iloc[0], str):
print("list to dict")
df_slice[agg] = df_slice[agg].map(str_to_dict)
if multiple_dfs:
metadata_gpb = df_slice.groupby('Sequence')[agg].apply(aggregate_dict)
logger.debug("Mapping {}, items: {}...".format(agg, len(metadata_gpb)))
df_slice[agg] = df_slice['Sequence'].map(metadata_gpb)
drop_duplicate_rows(df_slice)
def drop_duplicate_rows(df_slice: DataFrame):
"""
Drop duplicate rows from a dataframe slice.
:param df_slice:
:return:
"""
bef = df_slice.shape[0]
logger.debug("Current # of rows: {}. Dropping duplicate rows..".format(bef))
df_slice.drop_duplicates(subset='Sequence', keep='first', inplace=True)
after = df_slice.shape[0]
logger.debug("Dropped {} duplicated rows.".format(bef - after))
# noinspection PyUnusedLocal
def conditional_pre_gpb_drop(df_occ_slice: list, df_meta_slice: list):
"""
Drop samples from metadata dataframe slice depending on already reduced Occurrences slice (occ slice set up as basis
for drop because it's the fastest to compute. Only runs if contents df_occ_slice have already been reduced.
:param df_occ_slice: list of (file_code, df_occurrence_slice) tuples
:param df_meta_slice: list of (file_code, df_meta_slice) tuples
:return: reduced df_meta_slice
"""
for df_code_i, df_slice_i in df_occ_slice:
for df_code_j, df_slice_j in df_meta_slice:
if df_code_i == df_code_j:
seq_occ = df_slice_i.Sequence.values
df_slice_j.query("Sequence.isin(@seq_occ)", inplace=True)
return df_meta_slice
def process_dataframes(pool: Pool, dflist: list, chunks: int, depth: int = 0, additional: DataFrame = None):
"""
Main func
:param pool: pool of worker processes (daemons)
:param dflist: list of dataframes to evaluate
:param chunks: len(partitions)
:param depth: Increases roughly every 4-5 days of data accumulation
:param additional: from batching process, output from previous run that needs to be merged with dflist contents
:return: contents of dflist merged into a single, metadata+occurrence-aggregated dataframe
"""
# or (len(dflist) == 1 and depth == 0)
if len(dflist) > 1 or (len(dflist) == 1 and depth == 0):
new_list = []
partitions = multi_utils.partition_list(dflist, chunks, FEWER_THAN_CPU)
multi_dfs = []
parts_to_merge = len(partitions)
print("number of partitions:", partitions)
print("number of dfs:", len(dflist))
for i, index_list in enumerate(partitions):
print(index_list)
lst = [dflist[ind] for ind in index_list]
multi_dfs.append(len(lst) > 1)
logger.info("Run: {} Num_of_df_to_merge: {}".format(i, len(lst)))
pair_df = | pd.concat(lst) | pandas.concat |
import os
import openmatrix as omx
import pandas as pd
import geopandas as gpd
from shapely import wkt
import numpy as np
import logging
import requests
from tqdm import tqdm
import time
from pilates.utils.geog import get_block_geoms, \
map_block_to_taz, get_zone_from_points, get_taz_geoms
logger = logging.getLogger(__name__)
beam_skims_types = {'timePeriod': str,
'pathType': str,
'origin': int,
'destination': int,
'TIME_minutes': float,
'TOTIVT_IVT_minutes': float,
'VTOLL_FAR': float,
'DIST_meters': float,
'WACC_minutes': float,
'WAUX_minutes': float,
'WEGR_minutes': float,
'DTIM_minutes': float,
'DDIST_meters': float,
'KEYIVT_minutes': float,
'FERRYIVT_minutes': float,
'BOARDS': float,
'DEBUG_TEXT': str
}
def _load_raw_beam_skims(settings, remote_url=None):
if not remote_url:
skims_fname = settings.get('skims_fname', False)
path_to_beam_skims = os.path.join(
settings['beam_local_output_folder'], skims_fname)
else:
path_to_beam_skims = remote_url
try:
# load skims from disk or url
skims = pd.read_csv(path_to_beam_skims, dtype=beam_skims_types)
except KeyError:
raise KeyError(
"Couldn't find input skims at {0}".format(path_to_beam_skims))
return skims
def _create_skim_object(data_dir, overwrite=True):
skims_path = os.path.join(data_dir, 'skims.omx')
skims_exist = os.path.exists(skims_path)
if skims_exist:
if (overwrite):
logger.info("Found existing skims, removing.")
os.remove(skims_path)
else:
logger.info("Found existing skims, no need to re-create.")
return False
logger.info("Creating skims.omx from BEAM skims")
skims = omx.open_file(skims_path, 'w')
skims.close()
return True
def _create_skims_by_mode(settings):
"""
Returns 2 OD pandas dataframe for auto and transit
"""
logger.info("Splitting BEAM skims by mode.")
skims_df = _load_raw_beam_skims(settings)
num_hours = skims_df['timePeriod'].nunique()
num_modes = skims_df['pathType'].nunique()
num_od_pairs = len(skims_df) / num_hours / num_modes
# make sure the matrix is square
num_taz = np.sqrt(num_od_pairs)
assert num_taz.is_integer()
num_taz = int(num_taz)
# convert beam skims to activitysim units (miles and minutes)
skims_df['DIST_miles'] = skims_df['DIST_meters'] * (0.621371 / 1000)
skims_df['DDIST_miles'] = skims_df['DDIST_meters'] * (0.621371 / 1000)
skims_df = skims_df.sort_values(['origin', 'destination', 'TIME_minutes'])
logger.info('Splitting out auto skims.')
auto_df = skims_df.loc[skims_df['pathType'] == 'SOV']
logger.info('Splitting out transit skims.')
transit_df = skims_df.loc[
skims_df['pathType'].isin(settings['transit_paths'])]
return auto_df, transit_df, num_taz
def _distance_skims(settings, auto_df, data_dir, num_taz):
# Open skims object
skims_path = os.path.join(data_dir, 'skims.omx')
skims = omx.open_file(skims_path, 'a')
# TO DO: Include walk and bike distances,
# for now walk and bike are the same as drive.
distances_auto = auto_df.drop_duplicates(
['origin', 'destination'],
keep='last')[settings['beam_asim_hwy_measure_map']['DIST']]
# TO DO: Do something better.
distances_auto = distances_auto.replace(
0, np.random.normal(39, 20))
# distances_walk = walk_df.drop_duplicates(
# ['origin', 'destination'])[beam_asim_hwy_measure_map['DIST']]
mx_auto = distances_auto.values.reshape((num_taz, num_taz))
# mx_walk = distances_walk.values.reshape((num_taz, num_taz))
# Distance matrices
skims['DIST'] = mx_auto
skims['DISTBIKE'] = mx_auto
skims['DISTWALK'] = mx_auto
skims.close()
def _transit_access(transit_df, access_paths, num_taz):
''' OD pair value for drive access '''
df = transit_df.loc[transit_df.pathType.isin(access_paths), :].copy()
df.drop_duplicates(['origin', 'destination'], keep='last', inplace=True)
assert df.shape[0] == num_taz * num_taz
return df
def _transit_skims(settings, transit_df, data_dir, num_taz):
""" Generate transit OMX skims"""
logger.info("Creating transit skims.")
# Open skims object
skims_path = os.path.join(data_dir, 'skims.omx')
skims = omx.open_file(skims_path, 'a')
drive_access = ['DRV_COM_WLK', 'DRV_HVY_WLK',
'DRV_LOC_WLK', 'DRV_LRF_WLK', 'DRV_EXP_WLK']
walk_acces = ['WLK_COM_WLK', 'WLK_HVY_WLK', 'WLK_LOC_WLK',
'WLK_LRF_WLK', 'WLK_EXP_WLK', 'WLK_TRN_WLK']
drive_access_values = _transit_access(transit_df, drive_access, num_taz)
walk_access_values = _transit_access(transit_df, walk_acces, num_taz)
for path in settings['transit_paths']:
path_ = path.replace('EXP', "LOC") # Get the values of LOC for EXP.
path_ = path_.replace('TRN', "LOC") # Get the values of LOC for TRN.
# # When BEAM skims generates all skims
# mask1 = transit_df['pathType'] == path_
# df = transit_df[mask1]
# TO DO: Drive access needs to be different for each transit mode
# TO DO: Walk access needs to be different for each transit mode
if path[:4] == 'DRIVE':
df = drive_access_values
else:
df = walk_access_values
beam_asim_transit_measure_map = settings[
'beam_asim_transit_measure_map']
for period in settings['periods']:
# # When BEAM skims generates all skims
# mask2 = df_['timePeriod'] == period
# df_ = df[mask2]
df_ = df
for measure in beam_asim_transit_measure_map.keys():
name = '{0}_{1}__{2}'.format(path, measure, period)
if (measure == 'FAR') or (measure == 'BOARDS'):
vals = df_[beam_asim_transit_measure_map[measure]]
mx = vals.values.reshape((num_taz, num_taz), order='C')
elif beam_asim_transit_measure_map[measure]:
# activitysim estimated its models using transit skims from Cube
# which store time values as scaled integers (e.g. x100), so their
# models also divide transit skim values by 100. Since our skims
# aren't coming out of Cube, we multiply by 100 to negate the division.
# This only applies for travel times. Fare is not multiplied by 100.
vals = df_[beam_asim_transit_measure_map[measure]]
mx = vals.values.reshape((num_taz, num_taz), order='C') * 100
else:
mx = np.zeros((num_taz, num_taz))
skims[name] = mx
skims.close()
def _auto_skims(settings, auto_df, data_dir, num_taz):
logger.info("Creating drive skims.")
# Open skims object
skims_path = os.path.join(data_dir, 'skims.omx')
skims = omx.open_file(skims_path, 'a')
# Create skims
for period in settings['periods']:
mask1 = auto_df['timePeriod'] == period
df = auto_df[mask1]
beam_asim_hwy_measure_map = settings['beam_asim_hwy_measure_map']
for path in settings['hwy_paths']:
for measure in beam_asim_hwy_measure_map.keys():
name = '{0}_{1}__{2}'.format(path, measure, period)
if beam_asim_hwy_measure_map[measure]:
vals = df[beam_asim_hwy_measure_map[measure]]
mx = vals.values.reshape((num_taz, num_taz), order='C')
else:
mx = np.zeros((num_taz, num_taz))
skims[name] = mx
skims.close()
def _create_offset(auto_df, data_dir):
logger.info("Creating skims offset keys")
# Open skims object
skims_path = os.path.join(data_dir, 'skims.omx')
skims = omx.open_file(skims_path, 'a')
# Generint offset
taz_equivs = auto_df.origin.sort_values().unique()
skims.create_mapping('taz', taz_equivs)
skims.close()
def create_skims_from_beam(data_dir, settings, overwrite=True):
new = _create_skim_object(data_dir, overwrite)
if new:
auto_df, transit_df, num_taz = _create_skims_by_mode(settings)
# Create skims
_distance_skims(settings, auto_df, data_dir, num_taz)
_auto_skims(settings, auto_df, data_dir, num_taz)
_transit_skims(settings, transit_df, data_dir, num_taz)
# Create offset
_create_offset(auto_df, data_dir)
def _get_full_time_enrollment(state_fips, year):
base_url = (
'https://educationdata.urban.org/api/v1/'
'{t}/{so}/{e}/{y}/{l}/?{f}&{s}&{r}&{cl}&{ds}&{fips}')
levels = ['undergraduate', 'graduate']
enroll_list = []
for level in levels:
level_url = base_url.format(
t='college-university', so='ipeds', e='fall-enrollment',
y=year, l=level, f='ftpt=1', s='sex=99',
r='race=99', cl='class_level=99', ds='degree_seeking=99',
fips='fips={0}'.format(state_fips))
enroll_result = requests.get(level_url)
enroll = pd.DataFrame(enroll_result.json()['results'])
enroll = enroll[['unitid', 'enrollment_fall']].rename(
columns={'enrollment_fall': level})
enroll[level].clip(0, inplace=True)
enroll.set_index('unitid', inplace=True)
enroll_list.append(enroll)
full_time = pd.concat(enroll_list, axis=1).fillna(0)
full_time['full_time'] = full_time['undergraduate'] + full_time['graduate']
s = full_time.full_time
assert s.index.name == 'unitid'
return s
def _get_part_time_enrollment(state_fips):
base_url = (
'https://educationdata.urban.org/api/v1/'
'{t}/{so}/{e}/{y}/{l}/?{f}&{s}&{r}&{cl}&{ds}&{fips}')
levels = ['undergraduate', 'graduate']
enroll_list = []
for level in levels:
level_url = base_url.format(
t='college-university', so='ipeds', e='fall-enrollment',
y='2015', l=level, f='ftpt=2', s='sex=99',
r='race=99', cl='class_level=99', ds='degree_seeking=99',
fips='fips={0}'.format(state_fips))
enroll_result = requests.get(level_url)
enroll = pd.DataFrame(enroll_result.json()['results'])
enroll = enroll[['unitid', 'enrollment_fall']].rename(
columns={'enrollment_fall': level})
enroll[level].clip(0, inplace=True)
enroll.set_index('unitid', inplace=True)
enroll_list.append(enroll)
part_time = | pd.concat(enroll_list, axis=1) | pandas.concat |
import argparse
import os
import numpy as np
import torch
import torch.utils.data
from PIL import Image
import pandas as pd
import cv2
import json
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import functional as F
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
# from utils import utils
class FramesDataset(Dataset):
"""Creates a dataset that can be fed into DatasetLoader
Args:
frames (list): A list of cv2-compatible numpy arrays or
a list of PIL Images
"""
def __init__(self, frames):
# Convert to list of tensors
x = [F.to_tensor(img) for img in frames]
# Define which device to use, either gpu or cpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Send the frames to device
x_device = [img.to(device) for img in x]
self.x = x_device #x
def __getitem__(self, idx):
return self.x[idx]
def __len__(self):
return len(self.x)
class ObjectDetector():
"""ObjectDetector class with staticmethods that can be called from outside by importing as below:
from helmet_detector.detector import ObjectDetector
The staic methods can be accessed using ObjectDetector.<name of static method>()
"""
@staticmethod
def load_custom_model(model_path=None, num_classes=None):
"""Load a model from local file system with custom parameters
Load FasterRCNN model using custom parameters
Args:
model_path (str): Path to model parameters
num_classes (int): Number of classes in the custom model
Returns:
model: Loaded model in evaluation mode for inference
"""
# load an object detection model pre-trained on COCO
model = fasterrcnn_resnet50_fpn(pretrained=True)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features,num_classes)
# load previously fine-tuned parameters
# Define which device to use, either gpu or cpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
model.load_state_dict(torch.load(model_path))
model.to(device)
else:
model.load_state_dict(torch.load(model_path, map_location=device))
# Put the model in evaluation mode
model.eval()
return model
@staticmethod
def run_detection(img, loaded_model):
""" Run inference on single image
Args:
img: image in 'numpy.ndarray' format
loaded_model: trained model
Returns:
Default predictions from trained model
"""
# need to make sure we have 3d tensors of shape [C, H, W]
with torch.no_grad():
prediction = loaded_model(img)
return prediction
@staticmethod
def to_dataframe_highconf(predictions, conf_thres, frame_id):
""" Converts the default predictions into a Pandas DataFrame, only predictions with score greater than conf_thres
Args:
predictions (list): Default FasterRCNN implementation output.
This is a list of dicts with keys ['boxes','labels','scores']
frame_id : frame id
conf_thres: score greater than this will be kept as detections
Returns:
A Pandas DataFrame with columns
['frame_id','class_id','score','x1','y1','x2','y2']
"""
df_list = []
for i, p in enumerate(predictions):
boxes = p['boxes'].detach().cpu().tolist()
labels = p['labels'].detach().cpu().tolist()
scores = p['scores'].detach().cpu().tolist()
df = pd.DataFrame(boxes, columns=['x1','y1','x2','y2'])
df['class_id'] = labels
df['score'] = scores
df['frame_id'] = frame_id
df_list.append(df)
df_detect = pd.concat(df_list, axis=0)
df_detect = df_detect[['frame_id','class_id','score','x1','y1','x2','y2']]
# Keep predictions with high confidence, with score greater than conf_thres
df_detect = df_detect.loc[df_detect['score'] >= conf_thres]
return df_detect
@staticmethod
def to_dataframe(predictions):
""" Converts the default predictions into a Pandas DataFrame
Args:
predictions (list): Default FasterRCNN implementation output.
This is a list of dicts with keys ['boxes','labels','scores']
Returns:
A Pandas DataFrame with columns
['frame_id','class_id','score','x1','y1','x2','y2']
"""
df_list = []
for i, p in enumerate(predictions):
boxes = p['boxes'].detach().cpu().tolist()
labels = p['labels'].detach().cpu().tolist()
scores = p['scores'].detach().cpu().tolist()
df = pd.DataFrame(boxes, columns=['x1','y1','x2','y2'])
df['class_id'] = labels
df['score'] = scores
df['frame_id'] = i
df_list.append(df)
df_detect = pd.concat(df_list, axis=0)
df_detect = df_detect[['frame_id','class_id','score','x1','y1','x2','y2']]
return df_detect
@staticmethod
def calc_iou(boxA, boxB):
# https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA) * max(0, yB - yA)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
@staticmethod
def evaluate_detections_iou(gt, det, iou_threshold):
"""Evaluate and obtain FN and FP records between detection and annotations
Args:
df_detect (pandas.DataFrame): Detected boxes in a Pandas Dataframe
with columns ['frame_id','class_id','score','x1','y1','x2','y2']
df_annot (pandas.DataFrame): Known/annotation boxes in a Pandas
Dataframe with columns ['frame_id','class_id','x1','y1','x2','y2']
Returns:
result (pandas.DataFrame): Count of total number of objects in gt and det, and tp, fn, fp
with columns ['num_object_gt', 'num_object_det', 'tp', 'fn', 'fp']
df_fn (pandas.DataFrame): False negative records in a Pandas Dataframe
with columns ['frame_id','class_id','x1','y1','x2','y2']
df_fp (pandas.DataFrame): False positive records in a Pandas Dataframe
with columns ['frame_id','class_id', 'score', 'x1','y1','x2','y2']
"""
if (gt is not None) and (det is not None):
matched = []
for g in range(gt.shape[0]):
count = 0
for d in range(det.shape[0]):
iou = ObjectDetector.calc_iou(np.array(gt.iloc[g,2:]), np.array(det.iloc[d,3:]))
if (iou > iou_threshold):
if (count == 0):
max_conf = det.iloc[d,2]
temp = [g,d,iou, det.iloc[d,2]]
count +=1
elif (count > 0):
print("Multiple detections found, keep only with highest confidence")
if (max_conf < det.iloc[d,2]):
max_conf = det.iloc[d,2]
temp = [g,d,iou, det.iloc[d,2]]
count +=1
if (count != 0):
matched.append(temp)
df_tp = pd.DataFrame(matched, columns = ['gt_index', 'det_index', 'iou', 'det_conf'])
# To qualitatively find detection error, output fn and fp boxes. just visualize them on the frame
# Get unmatched gt - these are FNs
df_fn = []
num_fn = 0
for i in range(gt.shape[0]):
if i not in df_tp['gt_index'].tolist():
df_fn.append(gt.iloc[i,:])
num_fn +=1
if num_fn > 0:
df_fn = pd.DataFrame(data=df_fn)
df_fn.columns = ['frame_id','class_id','x1','y1','x2','y2']
else:
df_fn = None
# Get unmatched det - these are FPs
df_fp = []
num_fp = 0
for i in range(det.shape[0]):
if i not in df_tp['det_index'].tolist():
df_fp.append(det.iloc[i,:])
num_fp +=1
if num_fp > 0:
df_fp = pd.DataFrame(data=df_fp)
df_fp.columns = ['frame_id','class_id', 'score', 'x1','y1','x2','y2']
else:
# print("num_fp = 0 in frame_id {}".format(gt.iloc[0,0]))
df_fp = None
# To quantify detection error, output number of helmets in gt, number of helmets in det, tp, fn, fp
frame_id = gt.iloc[0,0]
tp = len(df_tp['gt_index'].unique())
result = []
result.append([frame_id,
gt.shape[0],
det.shape[0],
tp,
num_fn,
num_fp])
result = pd.DataFrame(data=result, columns = ['frame_id', 'num_object_gt', 'num_object_det', 'tp', 'fn', 'fp'])
else:
result = None
df_fn = None
df_fp = None
return result, df_fn, df_fp
@staticmethod
def find_frames_high_fn_fp(eval_det, fn_thres, fp_thres):
""" Find frames with high fn and fp, fn >= fn_thres and fp >= fp_thres
Arg:
eval_det: Detection evaluation matrix for whole play
fn_thres: Get a list of frames where fn is greater than equal to this value
fp_thres: Get a list of frames where fn is greater than equal to this value
Return:
frame_list: List of frames with high fn and fp
"""
frame_list = eval_det[(eval_det['fn'] >= fn_thres) & (eval_det['fp'] >= fp_thres)]['frame_id'].tolist()
return frame_list
@staticmethod
def run_detection_video(video_in, model_path, full_video=True, subset_video=60, conf_thres=0.9):
""" Run detection on video
Args:
video_in: Input video path
model_path: Location of the pretrained model.pt
full_video: Bool to indicate whether to run the whole video, default = False
subset_video: Number of frames to run detection on
conf_thres = Only consider detections with score higher than conf_thres, default = 0.9
Returns:
Predicted detection for all the frames in a video
df_predictions (pandas.DataFrame): prediction of detected object for all frames
with columns ['frame_id', 'class_id', 'score', 'x1', 'y1', 'x2', 'y2']
"""
# Capture the input video
vid = cv2.VideoCapture(video_in)
# Get video title
vid_title = os.path.splitext(os.path.basename(video_in))[0]
# Get total number of frames
num_frames = vid.get(cv2.CAP_PROP_FRAME_COUNT)
# load model
num_classes = 2
model = ObjectDetector.load_custom_model(model_path=model_path, num_classes=num_classes)
# if running for the whole video, then change the size of subset_video with total number of frames
if full_video:
subset_video = int(num_frames)
df_predictions = [] # predictions for whole video
for i in range(subset_video): #383
ret, frame = vid.read()
print("Processing frame#: {} running detection for videos".format(i))
# Get detection for this frame
list_frame = [frame]
dataset_frame = FramesDataset(list_frame)
prediction = ObjectDetector.run_detection(dataset_frame, model)
df_prediction = ObjectDetector.to_dataframe_highconf(prediction, conf_thres, i)
df_predictions.append(df_prediction)
# Concatenate predictions for all frames of the video
df_predictions = pd.concat(df_predictions)
return df_predictions
@staticmethod
def run_detection_frames(frames, model_path, batch_size=4, conf_thres=0.9, start_frame=0, end_frame=-1):
""" Run detection on list of frames
Args:
frames: List of frames between start_frame and end_frame of a full play video
model_path: Location of the pretrained model.pt
batch_size (int): Size of inference minibatch --> not sure we need this
conf_thres: Only consider detections with score higher than conf_thres, default = 0.9
start_frame: First frame number to output. Default is 0.
end_frame: Last frame number to output. If less than 1 then take all frames
Returns:
Predicted detection for all the frames between start_frame and end_frame of a full play video
df_predictions (pandas.DataFrame): prediction of detected object for all frames
with columns ['frame_id', 'class_id', 'score', 'x1', 'y1', 'x2', 'y2']
Todo:
Figure out how reduce confusion around start_frame/end_frame var collision with utils.frames_from_video()
"""
if end_frame>=1:
assert start_frame<=end_frame
if end_frame < 0:
end_frame = start_frame + len(frames) -1
# load model
num_classes = 2
model = ObjectDetector.load_custom_model(model_path=model_path, num_classes=num_classes)
df_predictions = [] # predictions for all frames
count = 0
for i in range(start_frame, end_frame):
# Get detection for this frame
dataset_frame = FramesDataset([frames[count]])
prediction = ObjectDetector.run_detection(dataset_frame, model)
df_prediction = ObjectDetector.to_dataframe_highconf(prediction, conf_thres, i)
df_predictions.append(df_prediction)
count+=1
# dataset = FramesDataset(frames)
# batcher = DataLoader(dataset, batch_size=batch_size, shuffle=False)
# for batch in batcher:
# prediction = ObjectDetector.run_detection(batch, model)
# df_prediction = ObjectDetector.to_dataframe_highconf(prediction, conf_thres, batch)
# df_predictions.append(df_prediction)
# Concatenate predictions for all frames of the video
df_predictions = pd.concat(df_predictions)
return df_predictions
@staticmethod
def get_gt_frame(frame_id, cur_boxes):
"""Get ground truth annotations on the frames
Args:
frame_id: Frame id
cur_boxes: Current annotation boxes "left", "width", "top", "height"
Returns:
box_ret: ground truth boxes in a Pandas
Dataframe with columns ['frame_id','class_id','x1','y1','x2','y2']
"""
box_out = []
for box in cur_boxes:
box_out.append([frame_id, 1, box[0],box[2],box[0]+box[1], box[2]+box[3]])
# Return gt dataframe
box_ret = pd.DataFrame(data = box_out, columns = ['frame_id','class_id','x1','y1','x2','y2'])
return box_ret
@staticmethod
def run_detection_eval_video(video_in, gtfile_name, model_path, full_video=True, subset_video=60, conf_thres=0.9, iou_threshold = 0.5):
""" Run detection on video
Args:
video_in: Input video path
gtfile_name: Ground Truth annotation json file name
model_path: Location of the pretrained model.pt
full_video: Bool to indicate whether to run the whole video, default = False
subset_video: Number of frames to run detection on
conf_thres = Only consider detections with score higher than conf_thres, default = 0.9
iou_threshold = Match detection with ground trurh if iou is higher than iou_threshold, default = 0.5
Returns:
Predicted detection for all the frames in a video, evaluation for detection, a dataframe with bounding boxes for
false negatives and false positives
df_predictions (pandas.DataFrame): prediction of detected object for all frames
with columns ['frame_id', 'class_id', 'score', 'x1', 'y1', 'x2', 'y2']
eval_results (pandas.DataFrame): Count of total number of objects in gt and det, and tp, fn, fp for all frames
with columns ['frame_id', 'num_object_gt', 'num_object_det', 'tp', 'fn', 'fp']
fns (pandas.DataFrame): False negative records in a Pandas Dataframe for all frames
with columns ['frame_id','class_id','x1','y1','x2','y2'], return empty dataframe if no false negatives
fps (pandas.DataFrame): False positive records in a Pandas Dataframe for all frames
with columns ['frame_id','class_id', 'score', 'x1','y1','x2','y2'], return empty dataframe if no false positives
"""
# Capture the input video
vid = cv2.VideoCapture(video_in)
# Get video title
vid_title = os.path.splitext(os.path.basename(video_in))[0]
# Get total number of frames
num_frames = vid.get(cv2.CAP_PROP_FRAME_COUNT)
print("********** Num of frames", num_frames)
# load model
num_classes = 2
model = ObjectDetector.load_custom_model(model_path=model_path, num_classes=num_classes)
print("Pretrained model loaded")
# Get GT annotations
gt_labels = pd.read_csv('/home/ec2-user/SageMaker/0Artifact/helmet_detection/input/train_labels.csv')#.fillna(0)
video = os.path.basename(video_in)
print("Processing video: ",video)
labels = gt_labels[gt_labels['video']==video]
# if running for the whole video, then change the size of subset_video with total number of frames
if full_video:
subset_video = int(num_frames)
# frames = []
df_predictions = [] # predictions for whole video
eval_results = [] # detection evaluations for the whole video
fns = [] # false negative detections for the whole video
fps = [] # false positive detections for the whole video
for i in range(subset_video):
ret, frame = vid.read()
print("Processing frame#: {} running detection and evaluation for videos".format(i+1))
# Get detection for this frame
list_frame = [frame]
dataset_frame = FramesDataset(list_frame)
prediction = ObjectDetector.run_detection(dataset_frame, model)
df_prediction = ObjectDetector.to_dataframe_highconf(prediction, conf_thres, i)
df_predictions.append(df_prediction)
# Get label for this frame
cur_label = labels[labels['frame']==i+1] # get this frame's record
cur_boxes = cur_label[['left','width','top','height']].values
gt = ObjectDetector.get_gt_frame(i+1, cur_boxes)
# Evaluate detection for this frame
eval_result, fn, fp = ObjectDetector.evaluate_detections_iou(gt, df_prediction, iou_threshold)
eval_results.append(eval_result)
if fn is not None:
fns.append(fn)
if fp is not None:
fps.append(fp)
# Concatenate predictions, evaluation resutls, fns and fps for all frames of the video
df_predictions = pd.concat(df_predictions)
eval_results = pd.concat(eval_results)
# Concatenate fns if not empty, otherwise create an empty dataframe
if not fns:
fns = | pd.DataFrame() | pandas.DataFrame |
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
| tm.assert_index_equal(rng, values) | pandas._testing.assert_index_equal |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
| tm.assert_frame_equal(df, expected) | pandas._testing.assert_frame_equal |
# install pattern
# install gensim
# install nltk
# install pyspellchecker
import re
import pandas as pd
import numpy as np
import gensim
from collections import Counter
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from spellchecker import SpellChecker
class Cleaning:
def __init__(self):
self.WORDS = {}
return
# remove urls (starts with https, http)
def remove_URL(self, col):
text = col.tolist()
TEXT=[]
for word in text:
if pd.isnull(word):
TEXT.append(word)
else:
TEXT.append(re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', str(word)))
se = pd.Series(TEXT)
return(se)
def count_mark(self, col):
df = pd.DataFrame()
rdf = pd.DataFrame()
# remove the special characters (numbers, exclamations and question marks) from the text
# store them in a dataframe for later use
text = col.tolist()
for row in text:
if pd.isnull(row):
ser = pd.Series([np.nan,np.nan,np.nan,np.nan], index=['Number', 'Exclamation_count', 'Question_Mark_count', 'Comments_OE'])
df = df.append(ser, ignore_index=True)
else:
numVals = []
excCount = []
quesCount = []
num = re.findall(r'\b\d+\b', row)
numVals.append(num)
# remove the number from the text
for n in num:
row = row.replace(n, '')
excCount.append(row.count('!'))
row = row.replace('!', '')
quesCount.append(row.count('?'))
row = row.replace('?', '')
numSeries = pd.Series(numVals)
rdf['Number'] = numSeries.values
excSeries = pd.Series(excCount)
rdf['Exclamation_count'] = excSeries
quesSeries = pd.Series(quesCount)
rdf['Question_Mark_count'] = quesSeries
txtSeries = pd.Series(row)
rdf['Comments_OE'] = txtSeries
df = df.append(rdf, ignore_index=True)
rdf = | pd.DataFrame() | pandas.DataFrame |
# coding:utf-8
import os
import base64
import configparser
import json
import urllib
import pandas as pd
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from QUANTAXIS.QAMarket.QABroker import QA_Broker
from QUANTAXIS.QAUtil.QASetting import setting_path
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION, ORDER_MODEL
CONFIGFILE_PATH = '{}{}{}'.format(setting_path, os.sep, 'config.ini')
DEFAULT_SHIPANE_URL = 'http://1172.16.31.10:8888'
def get_config_SPE():
config = configparser.ConfigParser()
if os.path.exists(CONFIGFILE_PATH):
config.read(CONFIGFILE_PATH)
try:
return config.get('SPE', 'uri')
except configparser.NoSectionError:
config.add_section('SPE')
config.set('SPE', 'uri', DEFAULT_SHIPANE_URL)
return DEFAULT_SHIPANE_URL
except configparser.NoOptionError:
config.set('SPE', 'uri', DEFAULT_SHIPANE_URL)
return DEFAULT_SHIPANE_URL
finally:
with open(CONFIGFILE_PATH, 'w') as f:
config.write(f)
else:
f = open(CONFIGFILE_PATH, 'w')
config.add_section('SPE')
config.set('SPE', 'uri', DEFAULT_SHIPANE_URL)
config.write(f)
f.close()
return DEFAULT_SHIPANE_URL
class QA_SPEBroker(QA_Broker):
def __init__(self, endpoint=get_config_SPE()):
self._endpoint = endpoint
self._session = requests
self.fillorder_headers = ['name', 'datetime', 'towards', 'price',
'amount', 'money', 'trade_id', 'order_id', 'code', 'shareholder', 'other']
self.holding_headers = ['code', 'name', 'hoding_price', 'price', 'pnl', 'amount',
'sell_available', 'pnl_money', 'holdings', 'total_amount', 'lastest_amounts', 'shareholder']
self.askorder_headers = ['code', 'towards', 'price', 'amount', 'transaction_price',
'transaction_amount', 'status', 'order_time', 'order_id', 'id', 'code', 'shareholders']
def call(self, func, params=''):
try:
response = self._session.get(
'{}/api/v1.0/{}'.format(self._endpoint, func), params)
text = response.text
return json.loads(text)
except:
print("ERROR")
return None
def call_post(self, func, params={}):
uri = '{}/api/v1.0/{}?client={}'.format(
self._endpoint, func, params.pop('client'))
response = self._session.post(uri, json=params)
text = response.text
return json.loads(text)
def call_delete(self, func, params=''):
uri = '{}/api/v1.0/{}?client={}'.format(
self._endpoint, func, params.pop('client'))
response = self._session.delete(uri)
text = response.text
print(text)
try:
return json.loads(text)
except:
return text
def data_to_df(self, result):
return | pd.DataFrame(data=result) | pandas.DataFrame |
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from message_ix import Scenario, macro
from message_ix.models import MACRO
from message_ix.testing import SCENARIO, make_westeros
# tons of deprecation warnings come from reading excel (xlrd library), ignore
# them for now
pytestmark = pytest.mark.filterwarnings("ignore")
W_DATA_PATH = Path(__file__).parent / 'data' / 'westeros_macro_input.xlsx'
MR_DATA_PATH = Path(__file__).parent / 'data' / 'multiregion_macro_input.xlsx'
class MockScenario:
def __init__(self):
self.data = pd.read_excel(MR_DATA_PATH, sheet_name=None)
for name, df in self.data.items():
if 'year' in df:
df = df[df.year >= 2030]
self.data[name] = df
def has_solution(self):
return True
def var(self, name, **kwargs):
df = self.data['aeei']
# add extra commodity to be removed
extra_commod = df[df.sector == 'i_therm']
extra_commod['sector'] = self.data['config']['ignore_sectors'][0]
# add extra region to be removed
extra_region = df[df.node == 'R11_AFR']
extra_region['node'] = self.data['config']['ignore_nodes'][0]
df = pd.concat([df, extra_commod, extra_region])
if name == 'DEMAND':
df = df.rename(columns={'sector': 'commodity'})
elif name in ['COST_NODAL_NET', 'PRICE_COMMODITY']:
df = df.rename(columns={
'sector': 'commodity',
'value': 'lvl'
})
df['lvl'] = 1e3
return df
@pytest.fixture(scope='class')
def westeros_solved(test_mp):
yield make_westeros(test_mp, solve=True)
@pytest.fixture(scope='class')
def westeros_not_solved(westeros_solved):
yield westeros_solved.clone(keep_solution=False)
def test_calc_valid_data_file(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
def test_calc_valid_data_dict(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None)
c = macro.Calculate(s, data)
c.read_data()
def test_calc_no_solution(westeros_not_solved):
s = westeros_not_solved
pytest.raises(RuntimeError, macro.Calculate, s, W_DATA_PATH)
def test_config(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.nodes = set(list(c.nodes) + ['foo'])
c.sectors = set(list(c.sectors) + ['bar'])
assert c.nodes == set(['Westeros', 'foo'])
assert c.sectors == set(['light', 'bar'])
c.read_data()
assert c.nodes == set(['Westeros'])
assert c.sectors == set(['light'])
def test_calc_data_missing_par(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None)
data.pop('gdp_calibrate')
c = macro.Calculate(s, data)
pytest.raises(ValueError, c.read_data)
def test_calc_data_missing_column(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None)
# skip first data point
data['gdp_calibrate'] = data['gdp_calibrate'].drop('year', axis=1)
c = macro.Calculate(s, data)
pytest.raises(ValueError, c.read_data)
def test_calc_data_missing_datapoint(westeros_solved):
s = westeros_solved
data = pd.read_excel(W_DATA_PATH, sheet_name=None)
# skip first data point
data['gdp_calibrate'] = data['gdp_calibrate'][1:]
c = macro.Calculate(s, data)
pytest.raises(ValueError, c.read_data)
#
# Regression tests: these tests were compiled upon moving from R to Python,
# values were confirmed correct at the time and thus are tested explicitly here
#
def test_calc_growth(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._growth()
assert len(obs) == 4
obs = obs.values
exp = np.array([0.0265836, 0.041380, 0.041380, 0.029186])
assert np.isclose(obs, exp).all()
def test_calc_rho(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._rho()
assert len(obs) == 1
obs = obs[0]
exp = -4
assert obs == exp
def test_calc_gdp0(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._gdp0()
assert len(obs) == 1
obs = obs[0]
exp = 500
assert obs == exp
def test_calc_k0(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._k0()
assert len(obs) == 1
obs = obs[0]
exp = 1500
assert obs == exp
def test_calc_total_cost(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._total_cost()
# 4 values, 3 in model period, one in history
assert len(obs) == 4
obs = obs.values
exp = np.array([15, 17.477751, 22.143633, 28.189798]) / 1e3
assert np.isclose(obs, exp).all()
def test_calc_price(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._price()
# 4 values, 3 in model period, one in history
assert len(obs) == 4
obs = obs.values
exp = np.array([195, 183.094376, 161.645111, 161.645111])
assert np.isclose(obs, exp).all()
def test_calc_demand(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._demand()
# 4 values, 3 in model period, one in history
assert len(obs) == 4
obs = obs.values
exp = np.array([90, 100, 150, 190])
assert np.isclose(obs, exp).all()
def test_calc_bconst(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._bconst()
assert len(obs) == 1
obs = obs[0]
exp = 3.6846576e-05
assert np.isclose(obs, exp)
def test_calc_aconst(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
obs = c._aconst()
assert len(obs) == 1
obs = obs[0]
exp = 26.027323
assert np.isclose(obs, exp)
def test_init(message_test_mp):
scen = Scenario(message_test_mp, **SCENARIO['dantzig'])
scen = scen.clone('foo', 'bar')
scen.check_out()
MACRO.initialize(scen)
scen.commit('foo')
scen.solve()
assert np.isclose(scen.var('OBJ')['lvl'], 153.675)
assert 'mapping_macro_sector' in scen.set_list()
assert 'aeei' in scen.par_list()
assert 'DEMAND' in scen.var_list()
assert 'COST_ACCOUNTING_NODAL' in scen.equ_list()
def test_add_model_data(westeros_solved):
base = westeros_solved
clone = base.clone('foo', 'bar', keep_solution=False)
clone.check_out()
MACRO.initialize(clone)
macro.add_model_data(base, clone, W_DATA_PATH)
clone.commit('finished adding macro')
clone.solve()
obs = clone.var('OBJ')['lvl']
exp = base.var('OBJ')['lvl']
assert np.isclose(obs, exp)
def test_calibrate(westeros_solved):
base = westeros_solved
clone = base.clone(base.model, 'test macro calibration',
keep_solution=False)
clone.check_out()
MACRO.initialize(clone)
macro.add_model_data(base, clone, W_DATA_PATH)
clone.commit('finished adding macro')
start_aeei = clone.par('aeei')['value']
start_grow = clone.par('grow')['value']
macro.calibrate(clone, check_convergence=True)
end_aeei = clone.par('aeei')['value']
end_grow = clone.par('grow')['value']
# calibration should have changed some/all of these values and none should
# be NaNs
assert not np.allclose(start_aeei, end_aeei, rtol=1e-2)
assert not np.allclose(start_grow, end_grow, rtol=1e-2)
assert not end_aeei.isnull().any()
assert not end_grow.isnull().any()
def test_calibrate_roundtrip(westeros_solved):
# this is a regression test with values observed on Aug 9, 2019
with_macro = westeros_solved.add_macro(
W_DATA_PATH, check_convergence=True)
aeei = with_macro.par('aeei')['value'].values
assert len(aeei) == 4
exp = [0.02, 0.07173523, 0.03741514, 0.01990172]
assert np.allclose(aeei, exp)
grow = with_macro.par('grow')['value'].values
assert len(grow) == 4
exp = [0.02658363, 0.06910296, 0.07952086, 0.02452946]
assert np.allclose(grow, exp)
#
# These are a series of tests to guarantee multiregion/multisector
# behavior is as expected.
#
def test_multiregion_valid_data():
s = MockScenario()
c = macro.Calculate(s, MR_DATA_PATH)
c.read_data()
def test_multiregion_derive_data():
s = MockScenario()
c = macro.Calculate(s, MR_DATA_PATH)
c.read_data()
c.derive_data()
nodes = ['R11_AFR', 'R11_CPA']
sectors = ['i_therm', 'rc_spec']
# make sure no extraneous data is there
check = c.data['demand'].reset_index()
assert (check['node'].unique() == nodes).all()
assert (check['sector'].unique() == sectors).all()
obs = c.data['aconst']
exp = pd.Series([3.74767687, 0.00285472], name='value',
index=pd.Index(nodes, name='node'))
pd.testing.assert_series_equal(obs, exp)
obs = c.data['bconst']
idx = pd.MultiIndex.from_product([nodes, sectors],
names=['node', 'sector'])
exp = pd.Series([1.071971e-08, 1.487598e-11, 9.637483e-09, 6.955715e-13],
name='value', index=idx)
| pd.testing.assert_series_equal(obs, exp) | pandas.testing.assert_series_equal |
#%%
#### Processes the raw data json using pandas to get
#### dataframes that can be exported directly to Postgres as normalized tables
import sys
import inspect
import os
import json
import pandas as pd
class DataProcessing:
def __init__(self):
self.product_data_path = self.data_path = '../data/product_data'
self.recipes_data_path = '../data/recipes_data'
self.raw_product_data = DataProcessing._read_data(self.product_data_path)
self.raw_recipes_data = DataProcessing._read_data(self.recipes_data_path)
self.dictionary_of_category_dataframes = {}
self._product_data_to_dataframes() # populate dictionary_of_category_dataframes
self.all_products_df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import pandas as pd
from featuretools.primitives import IsNull, Max
from featuretools.primitives.base import PrimitiveBase, make_agg_primitive
from featuretools.variable_types import DatetimeTimeIndex, Numeric
def test_call_agg():
primitive = Max()
# the assert is run twice on purpose
assert 5 == primitive(range(6))
assert 5 == primitive(range(6))
def test_call_trans():
primitive = IsNull()
assert pd.Series([False for i in range(6)]).equals(primitive(range(6)))
assert pd.Series([False for i in range(6)]).equals(primitive(range(6)))
def test_uses_calc_time():
def time_since_last(values, time=None):
time_since = time - values.iloc[0]
return time_since.total_seconds()
TimeSinceLast = make_agg_primitive(time_since_last,
[DatetimeTimeIndex],
Numeric,
name="time_since_last",
uses_calc_time=True)
primitive = TimeSinceLast()
datetimes = pd.Series([datetime(2015, 6, 7), datetime(2015, 6, 6)])
answer = 86400.0
assert answer == primitive(datetimes, time=datetime(2015, 6, 8))
def test_call_multiple_args():
class TestPrimitive(PrimitiveBase):
def get_function(self):
def test(x, y):
return y
return test
primitive = TestPrimitive()
assert pd.Series([0, 1]).equals(primitive(range(1), range(2)))
assert | pd.Series([0, 1]) | pandas.Series |
import argparse
import json
import math
import random
import string
import pandas as pd
from faker import Faker
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Path to json config file', dest='config_file_path', required=True)
parser.add_argument('--output', help='output xslx file', dest='output_file_path', required=True)
args = parser.parse_args()
with open(args.config_file_path) as config_file:
config = json.load(config_file)
num_of_initial_rows = int(config['total_row_cnt']) - int(config['total_row_cnt'] * config['duplication_rate'])
num_duplicated_rows = int(config['total_row_cnt']) - num_of_initial_rows
fake_gen = Faker(config['localization'])
initial_fake_data = | pd.DataFrame() | pandas.DataFrame |
"""
Coding: utf-8
Author: Jesse
Code Goal:
完成一级指标:政策主体的指标构建,使用颁布主题清单。
Code Logic:
分成两个部分:颁布主体行政级别以及是否联合发布
颁布主体行政级别部分
"""
import pandas as pd
import numpy as np
import xlwings as xw
from PolicyAnalysis import cptj as cj
"""
————————————————————
以下是使用 re 检索+ DFC 映射的数据处理写法
————————————————————
"""
class supervisors_re:
def __init__(self, Data, userdict, indifile, opsheet):
# 添加关键词词典
self.userdict = userdict
self.indifile = indifile
self.opsheet = opsheet
self.Data = Data
self.DTM = None
self.DFC = None
self.cls_map = None
self.sr_map = None
self.pt_map = None
self.export = None
# 定义一个Dataframe用于判断联合还是单独发布
self.middle = pd.DataFrame()
self.score_map()
self.class_map()
self.supervisors()
def score_map(self):
# 导入指标文件
app = xw.App(visible=False, add_book=False)
app.screen_updating = False
app.display_alerts = False
try:
wb = app.books.open(self.indifile)
sht = wb.sheets[self.opsheet]
df_indi = sht.used_range.value
df_indi = pd.DataFrame(df_indi)
df_indi.drop(0, axis=0, inplace=True)
df_indi.reset_index(drop=True, inplace=True)
sr_indi = df_indi[0]
sr_score = df_indi[1]
sr_map = dict([(k, v) for k, v in zip(sr_indi, sr_score)])
finally:
app.quit()
self.sr_map = sr_map
def class_map(self):
# 导入指标文件
app = xw.App(visible=False, add_book=False)
app.screen_updating = False
app.display_alerts = False
try:
wb = app.books.open(self.indifile)
sht = wb.sheets[self.opsheet]
df_indi = sht.used_range.value
df_indi = pd.DataFrame(df_indi)
df_indi.drop(0, axis=0, inplace=True)
df_indi.reset_index(drop=True, inplace=True)
cls_indi = df_indi[0]
cls_id = df_indi[2]
cls_map = dict([(k, v) for k, v in zip(cls_indi, cls_id)])
finally:
app.quit()
self.cls_map = cls_map
def supervisors(self):
"""
:param userdict_link: 关键词清单链接
:param Data: 输入的样本框, {axis: 1, 0: id, 1: 标题, 2: 正文, 3: 来源, 4:: freq}
:return: 返回一个Series, {index=df['id'], values=level of supervisors}
supervisors 会对输入的样本进行切词 + 词频统计处理,计算 发文主体+联合发布 的分数
"""
lst = cj.txt_to_list(self.userdict)
print('开始检索标题……')
data = self.Data.copy() # 防止对样本以外的样本框造成改动
# 接下来对标题进行检索
data['正文'] = data['标题']
result_title = cj.words_docs_freq(lst, data)
point_title = cj.dfc_point_giver(result_title['DFC'], self.sr_map)
class_title = cj.dfc_sort_filter(result_title['DFC'], self.cls_map)
# 接下来对来源进行检索
print('开始检索来源……')
data['正文'] = data['来源']
result_source = cj.words_docs_freq(lst, data)
self.DFC = result_source['DFC']
self.DTM = result_source['DTM']
point_source = cj.dfc_point_giver(self.DFC, self.sr_map)
class_source = cj.dfc_sort_filter(self.DTM, self.cls_map)
two_point = pd.concat([point_title, point_source], axis=1)
two_class = pd.concat([class_title, class_source], axis=1)
final_point = pd.DataFrame(two_point.agg(np.max, axis=1), columns=['颁布主体得分'])
final_class = pd.DataFrame(two_class.agg(np.max, axis=1), columns=['是否联合发布'])
final_class = final_class.applymap(lambda x: 1 if x > 1 else 0)
final_point.fillna(0, inplace=True)
final_class.fillna(0, inplace=True)
export_data = pd.concat([final_class, final_point], axis=1)
# ff_export_data.to_excel('Export_data_1_颁布主体+是否联合发布.xlsx')
self.export = export_data
"""
————————————————————————
以下是使用 jieba 分词后检索+ DTM 映射的数据处理写法
————————————————————————
"""
class supervisor_jieba:
def __init__(self, Data, userdict, indifile, opsheet, stopwords):
self.userdict = userdict
self.indifile = indifile
self.opsheet = opsheet
self.stopwords = stopwords
self.Data = Data
self.DTM = None
self.DFC = None
self.cls_map = None
self.sr_map = None
self.pt_map = None
self.export = None
# 定义一个Dataframe用于判断联合还是单独发布
self.middle = pd.DataFrame()
self.point_map()
self.class_map()
self.sort_map()
self.supervisors()
def class_map(self):
# 导入指标文件
app = xw.App(visible=False, add_book=False)
app.screen_updating = False
app.display_alerts = False
try:
wb = app.books.open(self.indifile)
sht = wb.sheets[self.opsheet]
df_indi = sht.used_range.value
df_indi = | pd.DataFrame(df_indi) | pandas.DataFrame |
"""
Created on Mon May 30 2020
@author: evadatinez
"""
from pathlib import Path
import pandas as pd
def complaintsData(fname, data):
"""This function updates a dataframe with the CSV data
with complaints
Params:
fname: path to FILE
data: pandas dataframe to store data
"""
path = Path(fname + '/Participant8Observations.csv')
# read csv from path
df = | pd.read_csv(path) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # 02__trans_motifs
#
# in this notebook, i find motifs that are associated w/ trans effects using linear models and our RNA-seq data
# In[1]:
import warnings
warnings.filterwarnings('ignore')
import itertools
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
import sys
from itertools import combinations
from scipy.stats import boxcox
from scipy.stats import linregress
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from statsmodels.stats.anova import anova_lm
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
# import utils
sys.path.append("../../../utils")
from plotting_utils import *
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'")
mpl.rcParams['figure.autolayout'] = False
# In[2]:
sns.set(**PAPER_PRESET)
fontsize = PAPER_FONTSIZE
# In[3]:
np.random.seed(2019)
# In[4]:
QUANT_ALPHA = 0.05
# ## functions
# In[5]:
def calculate_gc(row, col):
cs = row[col].count("C")
gs = row[col].count("G")
gc = (cs+gs)/len(row[col])
return gc
# In[6]:
def calculate_cpg(row, col):
cpgs = row[col].count("CG")
cpg = cpgs/len(row[col])
return cpg
# In[7]:
def sig_status(row):
if row.padj_trans < 0.05:
return "sig"
else:
return "not sig"
# In[8]:
def neg_odds(row):
if row["sig_status"] == "sig hESC":
return -row["hESC_odds"]
elif row["sig_status"] == "sig mESC":
return row["mESC_odds"]
else:
return np.nan
# In[9]:
def direction_match(row):
if row.activ_or_repr == "activating":
if row.beta_trans < 0 and row.logFC < 0:
return "match"
elif row.beta_trans > 0 and row.logFC > 0:
return "match"
else:
return "no match"
elif row.activ_or_repr == "repressing":
if row.beta_trans < 0 and row.logFC > 0:
return "match"
elif row.beta_trans > 0 and row.logFC < 0:
return "match"
else:
return "no match"
else:
return "unclear"
# ## variables
# In[10]:
human_motifs_f = "../../../data/04__mapped_motifs/human_motifs_filtered.txt.gz"
mouse_motifs_f = "../../../data/04__mapped_motifs/mouse_motifs_filtered.txt.gz"
# In[11]:
motif_info_dir = "../../../misc/01__motif_info"
motif_map_f = "%s/00__lambert_et_al_files/00__metadata/curated_motif_map.txt" % motif_info_dir
motif_info_f = "%s/00__lambert_et_al_files/00__metadata/motif_info.txt" % motif_info_dir
# In[12]:
sig_motifs_f = "../../../data/04__mapped_motifs/sig_motifs.txt"
# In[13]:
tss_map_f = "../../../data/01__design/01__mpra_list/mpra_tss.with_ids.RECLASSIFIED_WITH_MAX.txt"
# In[14]:
index_f = "../../../data/01__design/02__index/TWIST_pool4_v8_final.with_element_id.txt.gz"
# In[15]:
data_f = "../../../data/02__mpra/03__results/all_processed_results.txt"
# In[16]:
expr_dir = "../../../data/03__rna_seq/04__TF_expr"
orth_expr_f = "%s/orth_TF_expression.txt" % expr_dir
human_expr_f = "%s/hESC_TF_expression.txt" % expr_dir
mouse_expr_f = "%s/mESC_TF_expression.txt" % expr_dir
# In[17]:
orth_f = "../../../misc/00__ensembl_orthologs/ensembl96_human_mouse_orths.txt.gz"
# ## 1. import data
# In[18]:
index = pd.read_table(index_f, sep="\t")
index_elem = index[["element", "tile_type", "element_id", "name", "tile_number", "chrom", "strand", "actual_start",
"actual_end", "dupe_info"]]
index_elem = index_elem.drop_duplicates()
# In[19]:
tss_map = pd.read_table(tss_map_f, sep="\t")
tss_map.head()
# In[20]:
# this file is already filtered to correct tile nums
human_motifs = pd.read_table(human_motifs_f, sep="\t")
human_motifs.head()
# In[21]:
# this file is already filtered to correct tile nums
mouse_motifs = pd.read_table(mouse_motifs_f, sep="\t")
mouse_motifs.head()
# In[22]:
motif_info = pd.read_table(motif_info_f, sep="\t")
motif_info.head()
# In[23]:
sig_motifs = pd.read_table(sig_motifs_f)
sig_motifs = sig_motifs[sig_motifs["padj"] < 0.05]
print(len(sig_motifs))
sig_motifs.head()
# In[24]:
data = pd.read_table(data_f)
data.head()
# In[25]:
orth_expr = pd.read_table(orth_expr_f, sep="\t")
orth_expr.head()
# In[26]:
human_expr = pd.read_table(human_expr_f, sep="\t")
human_expr.head()
# In[27]:
mouse_expr = pd.read_table(mouse_expr_f, sep="\t")
mouse_expr.head()
# In[28]:
orth = pd.read_table(orth_f, sep="\t")
orth.head()
# ## 2. merge data to build model
# In[29]:
index_elem = index_elem[index_elem["name"].str.contains("EVO")]
index_elem.head()
# In[30]:
index_elem["tss_id"] = index_elem["name"].str.split("__", expand=True)[1]
index_elem["tss_tile_num"] = index_elem["name"].str.split("__", expand=True)[2]
index_elem.sample(5)
# In[31]:
index_human = index_elem[index_elem["name"].str.contains("HUMAN")]
index_mouse = index_elem[index_elem["name"].str.contains("MOUSE")]
index_mouse.sample(5)
# In[32]:
print(len(data))
data_elem = data.merge(index_human[["element", "tss_id", "tss_tile_num"]], left_on=["hg19_id", "tss_tile_num"],
right_on=["tss_id", "tss_tile_num"])
data_elem = data_elem.merge(index_mouse[["element", "tss_id", "tss_tile_num"]], left_on=["mm9_id", "tss_tile_num"],
right_on=["tss_id", "tss_tile_num"], suffixes=("_human", "_mouse"))
data_elem.drop(["tss_id_human", "tss_id_mouse"], axis=1, inplace=True)
print(len(data))
data_elem.head()
# In[33]:
data_elem["gc_human"] = data_elem.apply(calculate_gc, col="element_human", axis=1)
data_elem["gc_mouse"] = data_elem.apply(calculate_gc, col="element_mouse", axis=1)
data_elem["cpg_human"] = data_elem.apply(calculate_cpg, col="element_human", axis=1)
data_elem["cpg_mouse"] = data_elem.apply(calculate_cpg, col="element_mouse", axis=1)
data_elem.sample(5)
# In[34]:
data_elem.columns
# In[35]:
data_human = data_elem[["hg19_id", "tss_tile_num", "logFC_trans_human", "gc_human", "cpg_human", "HUES64_padj_hg19", "trans_status_one"]]
data_mouse = data_elem[["mm9_id", "tss_tile_num", "logFC_trans_mouse", "gc_mouse", "cpg_mouse", "mESC_padj_mm9", "trans_status_one"]]
data_human.columns = ["tss_id", "tss_tile_num", "logFC_trans", "gc", "cpg", "padj", "trans_status"]
data_mouse.columns = ["tss_id", "tss_tile_num", "logFC_trans", "gc", "cpg", "padj", "trans_status"]
data_indiv = data_human.append(data_mouse).drop_duplicates()
print(len(data_indiv))
data_indiv.head()
# ## 3. build reduced model
# In[36]:
scaled_features = StandardScaler().fit_transform(data_indiv[["logFC_trans", "gc", "cpg"]])
data_norm = | pd.DataFrame(scaled_features, index=data_indiv.index, columns=["logFC_trans", "gc", "cpg"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
ht_fail = pd.read_csv('/content/sample_data/heart failur classification dataset.csv')
ht_fail.head(5)
ht_fail.shape
ht_fail.isnull()
ht_fail.isnull().sum()
#Imputing missing values
from sklearn.impute import SimpleImputer
impute = SimpleImputer(missing_values=np.nan, strategy='mean')
impute.fit(ht_fail[['time']])
ht_fail['time'] = impute.transform(ht_fail[['time']])
ht_fail[['time']]
#Imputing missing values
from sklearn.impute import SimpleImputer
impute = SimpleImputer(missing_values=np.nan, strategy='mean')
impute.fit(ht_fail[['serum_sodium']])
ht_fail['serum_sodium'] = impute.transform(ht_fail[['serum_sodium']])
ht_fail[['serum_sodium']]
ht_fail.isnull().sum()
#Handling categorical features
#ht_fail.info
ht_fail
ht_fail['smoking'].unique()
ht_fail['smoking'] = ht_fail['smoking'].map({'No':0,'Yes':1})
ht_fail
ht_fail['sex'].unique()
ht_fail['sex'] = ht_fail['sex'].map({'Male':0,'Female':1})
ht_fail
#Train_Test Split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ht_fail.iloc[:, :-1], ht_fail.iloc[:,-1],random_state=1)
#SVM
from sklearn.svm import SVC
svc = SVC(kernel="linear")
svc.fit(x_train, y_train)
pre_score_svm = svc.score(x_test, y_test)
print("Training accuracy of the model is {:.2f}".format(svc.score(x_train, y_train)))
print("Testing accuracy of the model is {:.2f}".format(svc.score(x_test, y_test)))
predictions = svc.predict(x_test)
print(predictions)
#MLP
from sklearn.neural_network import MLPClassifier
nnc=MLPClassifier(hidden_layer_sizes=(7), activation="relu", max_iter=1000000)
nnc.fit(x_train, y_train)
pre_score_mlp = nnc.score(x_test, y_test)
print("The Training accuracy of the model is {:.2f}".format(nnc.score(x_train, y_train)))
print("The Testing accuracy of the model is {:.2f}".format(nnc.score(x_test, y_test)))
predictions = nnc.predict(x_test)
print(predictions)
#Random Forest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=50)
rfc.fit(x_train, y_train)
pre_score_rndmForest = rfc.score(x_test, y_test)
print("The Training accuracy of the model is {:.2f}".format(rfc.score(x_train, y_train)))
print("The Testing accuracy of the model is {:.2f}".format(rfc.score(x_test, y_test)))
predictions = rfc.predict(x_test)
print(predictions)
#performance without dimensionality reduction
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier(n_neighbors=4)
knn.fit(x_train, y_train)
print("Training accuracy is {:.2f}".format(knn.score(x_train, y_train)) )
print("Testing accuracy is {:.2f} ".format(knn.score(x_test, y_test)) )
htfail_origin = np.array(ht_fail.iloc[:, :-1])
htfail_origin_target = np.array(ht_fail.iloc[:,-1])
#dimensionality reduction
from sklearn.preprocessing import StandardScaler
scaler= StandardScaler()
htfail_df= pd.DataFrame(scaler.fit_transform(htfail_origin.data))
htfail_df=htfail_df.assign(target=htfail_origin_target)
#PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=7)
principal_components= pca.fit_transform(htfail_origin.data)
print(principal_components)
pca.explained_variance_ratio_
sum(pca.explained_variance_ratio_)
principal_df = pd.DataFrame(data=principal_components)
main_df= | pd.concat([principal_df, htfail_df[["target"]]], axis=1) | pandas.concat |
import pandas as pd
from koapy import KiwoomOpenApiContext
from koapy.backend.cybos.CybosPlusComObject import CybosPlusComObject
kiwoom = KiwoomOpenApiContext()
cybos = CybosPlusComObject()
kiwoom.EnsureConnected()
cybos.EnsureConnected()
kiwoom_codes = kiwoom.GetCommonCodeList()
cybos_codes = cybos.GetCommonCodeList()
cybos_codes = [code[1:] for code in cybos_codes]
kiwoom_codes = | pd.DataFrame(kiwoom_codes, columns=['code']) | pandas.DataFrame |
import scrapy
from bs4 import BeautifulSoup
import pandas as pd
import shelve
import os
import Notification
import json
def scrapHTML(html):
soup=BeautifulSoup(html,"html.parser")
tableRows=soup.find_all("tr")
# print(tableRows[1])
dataframe=[]
# [1:]
tableRows=tableRows[1:]
for tr in tableRows:
# print(tr)
# print('_'*100)
td=tr.find_all("td")
row = [cell.text for cell in td]
dataframe.append(row)
return dataframe
param1 = []
param2 = []
filename=''
cols=[]
with open("scraper_parameters.json",'r') as file:
parameters=json.loads(file.read())
param1 = parameters["allowed_domains"]
param2 = parameters["start_urls"]
filename = parameters["filename"]
cols = parameters["cols"]
class ScrapSpider(scrapy.Spider):
name = 'scrap'
allowed_domains = param1
start_urls = param2
def parse(self, response):
# pass
print('_'*100)
f=shelve.open("Query_Length")
scrapped=scrapHTML(response.body)
if not 'length' in f.keys():
# Initialize the query length
f['length']=-1
change=f['length']-len(scrapped)
if change!=0:
print("Change Happened!!!")
# Update
dataframe = | pd.DataFrame(scrapped, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : utils.py
# Modified : 17.02.2022
# By : <NAME> <<EMAIL>>
from collections import OrderedDict
import numpy as np
import os
from typing import List
import random
import cv2
from PIL import Image
import torch
import torchvision
from pathlib import Path
import torch.nn as nn
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from efficientnet_pytorch import EfficientNet
from torchvision import transforms
from torch.utils.data import Dataset
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_auc_score, f1_score
import wandb
training_transforms = transforms.Compose([#Microscope(),
#AdvancedHairAugmentation(),
transforms.RandomRotation(30),
#transforms.RandomResizedCrop(256, scale=(0.8, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
#transforms.ColorJitter(brightness=32. / 255.,saturation=0.5,hue=0.01),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
testing_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# Creating seeds to make results reproducible
def seed_everything(seed_value):
np.random.seed(seed_value)
random.seed(seed_value)
torch.manual_seed(seed_value)
os.environ['PYTHONHASHSEED'] = str(seed_value)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed = 2022
seed_everything(seed)
def get_parameters(net, EXCLUDE_LIST) -> List[np.ndarray]:
parameters = []
for i, (name, tensor) in enumerate(net.state_dict().items()):
# print(f" [layer {i}] {name}, {type(tensor)}, {tensor.shape}, {tensor.dtype}")
# Check if this tensor should be included or not
exclude = False
for forbidden_ending in EXCLUDE_LIST:
if forbidden_ending in name:
exclude = True
if exclude:
continue
# Convert torch.Tensor to NumPy.ndarray
parameters.append(tensor.cpu().numpy())
return parameters
def set_parameters(net, parameters, EXCLUDE_LIST):
keys = []
for name in net.state_dict().keys():
# Check if this tensor should be included or not
exclude = False
for forbidden_ending in EXCLUDE_LIST:
if forbidden_ending in name:
exclude = True
if exclude:
continue
# Add to list of included keys
keys.append(name)
params_dict = zip(keys, parameters)
state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
net.load_state_dict(state_dict, strict=False)
class Net(nn.Module):
def __init__(self, arch, return_feats=False):
super(Net, self).__init__()
self.arch = arch
self.return_feats = return_feats
if 'fgdf' in str(arch.__class__):
self.arch.fc = nn.Linear(in_features=1280, out_features=500, bias=True)
if 'EfficientNet' in str(arch.__class__):
self.arch._fc = nn.Linear(in_features=self.arch._fc.in_features, out_features=500, bias=True)
#self.dropout1 = nn.Dropout(0.2)
else:
self.arch.fc = nn.Linear(in_features=arch.fc.in_features, out_features=500, bias=True)
self.output = nn.Linear(500, 1)
def forward(self, images):
"""
No sigmoid in forward because we are going to use BCEWithLogitsLoss
Which applies sigmoid for us when calculating a loss
"""
x = images
features = self.arch(x)
output = self.output(features)
if self.return_feats:
return features
return output
def load_model(model = 'efficientnet-b2', device="cuda"):
if "efficientnet" in model:
arch = EfficientNet.from_pretrained(model)
elif model == "googlenet":
arch = torchvision.models.googlenet(pretrained=True)
else:
arch = torchvision.models.resnet50(pretrained=True)
model = Net(arch=arch).to(device)
return model
def create_split(source_dir, n_b, n_m):
# Split synthetic dataset
input_images = [str(f) for f in sorted(Path(source_dir).rglob('*')) if os.path.isfile(f)]
ind_0, ind_1 = [], []
for i, f in enumerate(input_images):
if f.split('.')[0][-1] == '0':
ind_0.append(i)
else:
ind_1.append(i)
train_id_list, val_id_list = ind_0[:round(len(ind_0)*0.8)], ind_0[round(len(ind_0)*0.8):] #ind_0[round(len(ind_0)*0.6):round(len(ind_0)*0.8)] ,
train_id_1, val_id_1 = ind_1[:round(len(ind_1)*0.8)], ind_1[round(len(ind_1)*0.8):] #ind_1[round(len(ind_1)*0.6):round(len(ind_1)*0.8)] ,
train_id_list = np.append(train_id_list, train_id_1)
val_id_list = np.append(val_id_list, val_id_1)
return train_id_list, val_id_list #test_id_list
def load_isic_by_patient(partition, path='/workspace/melanoma_isic_dataset'):
# Load data
df = pd.read_csv(os.path.join(path,'train_concat.csv'))
train_img_dir = os.path.join(path,'train/train/')
df['image_name'] = [os.path.join(train_img_dir, df.iloc[index]['image_name'] + '.jpg') for index in range(len(df))]
df["patient_id"] = df["patient_id"].fillna('nan')
# df.loc[df['patient_id'].isnull()==True]['target'].unique() # 337 rows melanomas
"""
# EXP 6: same bias/ratio same size - different BIASES
bias_df = pd.read_csv("/workspace/flower/bias_pseudoannotations_real_train_ISIC20.csv")
bias_df['image_name'] = [os.path.join(train_img_dir, bias_df.iloc[index]['image_name']) for index in range(len(bias_df))]
#bias_df = pd.merge(bias_df, df, how='inner', on=["image_name"])
target_groups = bias_df.groupby('target', as_index=False) # keep column target
df_ben = target_groups.get_group(0) # 32533 benign
df_mal = target_groups.get_group(1) # 5105 melanoma
# EXP 6
if partition == 0:
#FRAMES
df_b = df_ben.groupby('black_frame').get_group(1) # 687 with frame
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((1,0))[:323] # 2082 with frame
df = pd.concat([df_b, df_m]) # Use 1010 (32%mel) # TOTAL 2848 (75% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 1:
# RULES
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,1)).head(1125) # 4717 with rules and no frames
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,1)).head(375) # 516 with rules and no frames
df = pd.concat([df_b, df_m]) # Use 1500 (25%mel) # TOTAL 5233 (10% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 2:
# NONE
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,0)).head(1125) # 27129 without frames or rulers
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,0)).head(375) # 2507 without frames or rulers 14%
df = pd.concat([df_b, df_m]) # Use 1500 (25%mel) # TOTAL 29636 (8.4% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,0))[2000:5000] # 3000
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,0))[500:1500] # 1000 (30% M) T=4000
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
"""
# Split by Patient
patient_groups = df.groupby('patient_id') #37311
# Split by Patient and Class
melanoma_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if patient_groups.get_group(x)['target'].unique().all()==1] # 4188 - after adding ID na 4525
benign_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if 0 in patient_groups.get_group(x)['target'].unique()] # 2055 - 33123
np.random.shuffle(melanoma_groups_list)
np.random.shuffle(benign_groups_list)
# EXP 5: same bias/ratio different size - simulate regions
if partition == 0:
df_b = pd.concat(benign_groups_list[:270]) # 4253
df_m = pd.concat(melanoma_groups_list[:350]) # 1029 (19.5% melanomas) T=5282
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 1:
df_b = pd.concat(benign_groups_list[270:440]) # 2881
df_m = pd.concat(melanoma_groups_list[350:539]) # 845 (22.6% melanomas) T=3726
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 2:
df_b = pd.concat(benign_groups_list[440:490]) # 805
df_m = pd.concat(melanoma_groups_list[539:615]) # 194 (19.4% melanomas) T=999
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 3:
df_b = pd.concat(benign_groups_list[490:511]) # 341
df_m = pd.concat(melanoma_groups_list[615:640]) # 87 (20% melanomas) T=428
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 4:
df_b = pd.concat(benign_groups_list[515:520]) # 171
df_m = pd.concat(melanoma_groups_list[640:656]) # 47 (21.5% melanomas) T=218
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = pd.concat(benign_groups_list[520:720]) # 3531
df_m = pd.concat(melanoma_groups_list[700:1100]) # 1456 (29% M) T=4987
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
"""
# EXP 4: same size (1.5k) different ratio b/m
if partition == 1:
df_b = pd.concat(benign_groups_list[:75]) # 1118
df_m = pd.concat(melanoma_groups_list[:90]) # 499 (30.8% melanomas) T=1617
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 2:
df_b = pd.concat(benign_groups_list[75:185]) # 1600
df_m = pd.concat(melanoma_groups_list[90:95]) # 17 (1% melanomas) T=1617
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 0:
df_b = pd.concat(benign_groups_list[185:191]) # 160
df_m = pd.concat(melanoma_groups_list[150:550]) # 1454 (90% melanomas) T=1614
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = pd.concat(benign_groups_list[500:700]) # 3630
df_m = pd.concat(melanoma_groups_list[600:1100]) # 1779 (33% M) T=5409
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
# EXP 3
if partition == 2:
df_b = pd.concat(benign_groups_list[:90]) # 1348
df_m = pd.concat(melanoma_groups_list[:60]) # 172 (11.3% melanomas) T=1520
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 1:
df_b = pd.concat(benign_groups_list[90:150]) # 937
df_m = pd.concat(melanoma_groups_list[60:90]) # 99 (10% melanomas) T=1036
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 0:
df_b = pd.concat(benign_groups_list[150:170]) # 246
df_m = pd.concat(melanoma_groups_list[90:300]) # 626 (72% melanomas) T=872
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = pd.concat(benign_groups_list[170:370]) # 3343
df_m = pd.concat(melanoma_groups_list[300:1000]) # 2603
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
#EXP 2
if partition == 2:
df_b_test = pd.concat(benign_groups_list[1800:]) # 4462
df_b_train = pd.concat(benign_groups_list[800:1800]) # 16033 - TOTAL 20495 samples
df_m_test = pd.concat(melanoma_groups_list[170:281]) # 340
df_m_train = pd.concat(melanoma_groups_list[281:800]) # 1970 - TOTAL: 2310 samples
elif partition == 1:
df_b_test = pd.concat(benign_groups_list[130:250]) # 1949
df_b_train = pd.concat(benign_groups_list[250:800]) # 8609 - TOTAL 10558 samples
df_m_test = pd.concat(melanoma_groups_list[1230:]) # 303
df_m_train = pd.concat(melanoma_groups_list[800:1230]) # 1407 - TOTAL 1710 samples
else:
df_b_test = pd.concat(benign_groups_list[:30]) # 519
df_b_train = pd.concat(benign_groups_list[30:130]) # 1551 - TOTAL: 2070 samples
df_m_test = pd.concat(melanoma_groups_list[:70]) # 191
df_m_train = pd.concat(melanoma_groups_list[70:170]) # 314 - TOTAL: 505 samples
train_split = pd.concat([df_b_train, df_m_train])
valid_split = pd.concat([df_b_test, df_m_test])
"""
train_df=pd.DataFrame(train_split)
validation_df=pd.DataFrame(valid_split)
num_examples = {"trainset" : len(train_df), "testset" : len(validation_df)}
return train_df, validation_df, num_examples
def load_isic_by_patient_server( path='/workspace/melanoma_isic_dataset'):
# Load data
df = pd.read_csv(os.path.join(path,'train_concat.csv'))
train_img_dir = os.path.join(path,'train/train/')
df['image_name'] = [os.path.join(train_img_dir, df.iloc[index]['image_name'] + '.jpg') for index in range(len(df))]
df["patient_id"] = df["patient_id"].fillna('nan')
# df.loc[df['patient_id'].isnull()==True]['target'].unique() # 337 rows melanomas
# Split by Patient
patient_groups = df.groupby('patient_id') #37311
melanoma_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if patient_groups.get_group(x)['target'].unique().all()==1] # 4188 - after adding na 4525
benign_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if 0 in patient_groups.get_group(x)['target'].unique()] # 2055 - 33123
np.random.shuffle(melanoma_groups_list)
np.random.shuffle(benign_groups_list)
df_b_test = pd.concat(benign_groups_list[1800:]) # 4462
df_b_train = pd.concat(benign_groups_list[800:1800]) # 16033 - TOTAL 20495 samples
df_m_test = pd.concat(melanoma_groups_list[170:281]) # 340
df_m_train = pd.concat(melanoma_groups_list[281:800]) # 1970 - TOTAL: 2310 samples
train_split1 = pd.concat([df_b_train, df_m_train])
valid_split1 = pd.concat([df_b_test, df_m_test])
df_b_test = pd.concat(benign_groups_list[130:250]) # 1949
df_b_train = | pd.concat(benign_groups_list[250:800]) | pandas.concat |
import dash
from dash import dcc
import dash_bootstrap_components as dbc
from dash import html
from dash.dependencies import Input, Output, State
import pandas as pd
import random
import re
#######################
# Helper functions
#######################
# # convert a dataframe into a dict where each item is another dict corresponding
# # to a row of the html table
def make_table(df):
# table header
rows = [html.Tr([html.Th(col) for col in list(df.columns)])]
# loop through each unique filename and create a list of the Html objects to make that row
for r in range(len(df.index)):
row = [html.Th(df.iloc[r,c]) for c in range(len(df.columns))]
rows.append(html.Tr(row))
return rows
def get_auto_picks(start_pick,end_pick,pl,n_teams,roster):
randweights = [0]*25+[1]*9+[2]*5+[3]*3+[4]*2+[5]*2+[6]+[7]+[8]+[9]
for pick_number in range(start_pick,end_pick):
# determine team needs
team = (teamnames[:n_teams+1]+teamnames[n_teams:0:-1])[pick_number % (2*n_teams)]
pln = remove_unneeded_players(pl, roster, team)
# use randomness to determine which player will be selected
pick_no = randweights[random.randrange(0,49)]
pick_idx = pln.sort_values('Rank',ascending=True).index[pick_no]
pos= pl.loc[pick_idx,'Position(s)']
# update players table
pl.loc[pick_idx,'Available'] = False
pl.loc[pick_idx,'Rd'] = (pick_number-1) // n_teams + 1
pl.loc[pick_idx,'Pick'] = (pick_number-1) % n_teams + 1
pl.loc[pick_idx,'Slot'] = determine_slot(pos,roster,pl.loc[pl.Team == team])
pl.loc[pick_idx,'Team'] = team
return pl
def determine_slot(pos, ros, teampl):
m = ros.merge(teampl,on='Slot',how='left')
# add alternative positions
altpos = (['MI'] if '2B' in pos or 'SS' in pos else []) + (
['CI'] if '1B' in pos or '3B' in pos else []) + ['UT','BE']
for p in pos.split(', ') + altpos:
for a in m.loc[m.Player.isna()].sort_values('Num')['Slot']:
if p == re.sub('\d$','',a):
return a
else:
return '-'
def remove_unneeded_players(pl,roster,team):
# Remove the players from pl that team doesn't need based on roster
teampl = pl.loc[pl.Team == team]
teamros = roster.merge(teampl,on = 'Slot',how='left')
needs = list(teamros.loc[teamros.Player.isna(),'Slot'].str.replace('\d+$','',regex=True))
# handle MI and CI
if 'MI' in needs:
needs = needs + ['SS','2B']
if 'CI' in needs:
needs = needs + ['1B','3B']
# filter players that don't match roster needs
if ('BE' not in needs) and ('UT' not in needs):
return pl.loc[pl['Position(s)'].str.match('|'.join(needs)) & pl['Available']]
else:
return pl.loc[pl['Available']]
#######################
# Initial Data Prep
#######################
players = pd.read_csv('players.csv')
players['Team'], players['Slot'], players['Rd'], players['Pick'] = (pd.NA, pd.NA, pd.NA, pd.NA)
teamnames = 'AABCDEFGHIJKLMNOPQRSTUVWXYZ'
#######################
# Dash app layout
#######################
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
# header for the app
header = [dbc.Row(html.H1('Draft Simulator')),
dbc.Row(html.Div(' ',style = {'height': "35px"}))
]
startsection = [
dbc.Row([
dbc.Col(
html.Div([
dcc.Dropdown(id='n-p-dropdown',options=list(range(5,16)),value=9),
html.Div(children='# of Pitchers')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-of-dropdown',options=list(range(3,8)),value=3),
html.Div(children='# of Outfielders')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-c-dropdown',options=list(range(1,4)),value=1),
html.Div(children='# of Catchers')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-ci-dropdown',options=list(range(0,6)),value=1),
html.Div(children='# of Corner IF')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-mi-dropdown',options=list(range(0,6)),value=1),
html.Div(children='# of Middle IF')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-ut-dropdown',options=list(range(0,21)),value=2),
html.Div(children='# of Utility Players')
],style = {'width':'90%'}), md=1),
dbc.Col(
html.Div([
dcc.Dropdown(id='n-be-dropdown',options=list(range(0,21)),value=2),
html.Div(children='# of Bench Players')
],style = {'width':'15%'}), md=6)
],id = 'start-row-1'),
dbc.Row(html.Div(' ',style = {'height': "25px"})),
dbc.Row([
dbc.Col(
html.Div([
dcc.Dropdown(id='n-teams-dropdown',options=list(range(2,25)),value=10),
html.Div(children='Select number of teams')
],style = {'width':'75%'}), md=2),
dbc.Col(
html.Div([
dcc.Dropdown(id='position-dropdown'),
html.Div(children='Select your draft position')
],style = {'width':'75%'}), md=2),
dbc.Col(html.Button('Begin!',id='begin-button',style={'width': '25%'}),md=8)
],id = 'start-row-2')
]
# put the table of the sorted data in the left half of the screen
draftpanel = [
html.Div([
html.Div([
html.H3('Select Player'),
dbc.Row([
dbc.Col([
dcc.Dropdown(options = players.Rank.astype(str)+'. '+players.Name+' ('+players['Position(s)']+')'
,id = 'pick-dropdown'),
html.Button('Draft Player', id='draft-button', n_clicks=0)],md=5),
dbc.Col([
html.Table(make_table(pd.DataFrame({})),id='bat-proj-table',className='table'),
html.Table(make_table(pd.DataFrame({})),id='pit-proj-table',className='table')],md=7)
]),
html.Div(' ',style={'height':'20px'})
],id = 'draft-div'),
html.H3('Team Roster'),
dcc.Dropdown(id='team-roster-dropdown',options=['My-Team'], value = 'My-Team'),
html.Table(make_table(pd.DataFrame({})),id='roster-table',className='table')
],id='draft-panel',style={"width": "90%"})
]
pickspanel = [
html.Div([
html.H3('Last Picks'),
html.Table(make_table(pd.DataFrame({})),id='last-picks-table',className='table'),
html.Div(players.to_json(),id='players',style={'display': 'none'}),
html.Div(0,id='n-teams',style={'display': 'none'}),
html.Div(0,id='position',style={'display': 'none'}),
html.Div(0,id='pick-number',style={'display': 'none'}),
html.Div(0,id='roster',style={'display': 'none'})
],style = {"width": "90%"})
]
projpanel = [
html.Div([
html.H3('Projected Standings'),
dcc.RadioItems(['Stats','Ranks'],'Stats',id='proj-type-radioitems',style = {'width':'200%'}),
html.Table(make_table(pd.DataFrame({})),id='proj-standings-table',className='table')
])
]
# lay out the app based on the above panel definitions
app.layout = dbc.Container([
html.Div(header),
html.Div(startsection,id ='start-section'),
html.Div(dbc.Row([dbc.Col(draftpanel, md=5),
dbc.Col(projpanel, md=5),
dbc.Col(pickspanel, md=2)])
,id = 'main-section',style = {'display':'none'})
],fluid=True)
# #######################
# # Reactive callbacks
# #######################
@app.callback(
Output('roster','children'),
[Input('n-of-dropdown','value'),
Input('n-p-dropdown','value'),
Input('n-c-dropdown','value'),
Input('n-mi-dropdown','value'),
Input('n-ci-dropdown','value'),
Input('n-ut-dropdown','value'),
Input('n-be-dropdown','value'),
Input('begin-button','n_clicks')]
)
def update_roster(n_of,n_p,n_c,n_mi,n_ci,n_ut,n_be,n_clicks):
slots = (['C'+str(i+1) for i in range(n_c)] +
['1B','2B','3B','SS'] +
['OF'+str(i+1) for i in range(n_of)] +
['MI'+str(i+1) for i in range(n_mi)] +
['CI'+str(i+1) for i in range(n_ci)] +
['P'+str(i+1) for i in range(n_p)] +
['UT'+str(i+1) for i in range(n_ut)] +
['BE'+str(i+1) for i in range(n_be)])
roster = pd.DataFrame({'Slot': slots,'Num': list(range(len(slots)))})
return roster.to_json()
@app.callback(
Output('position-dropdown', 'options'),
[Input('n-teams-dropdown', 'value')]
)
def update_position_dropdown(num_teams):
return list(range(1,num_teams+1))
@app.callback(
[Output('pick-dropdown','options')],
[Input('players','children'),
Input('roster','children')]
)
def update_pick_options(players_json,roster_json):
pl = pd.read_json(players_json)
roster = pd.read_json(roster_json)
pln = remove_unneeded_players(pl, roster, 'My-Team')
return [list(pln.Rank.astype(str)+'. '+pln.Player+' ('+pln['Position(s)']+')')]
@app.callback(
Output('last-picks-table', 'children'),
[Input('players','children')],
[State('n-teams','children')]
)
def update_last_picks_table(players_json,n_teams):
pl = pd.read_json(players_json)
last_picks = pl.loc[~pl.Team.isna()]
last_picks['Pick'] = (last_picks['Rd']-1)*n_teams + last_picks['Pick']
last_picks.loc[last_picks.Team == 'My-Team','Team'] = 'Me'
return make_table(last_picks.sort_values('Pick',ascending = False)
[['Pick','Team','Player']].iloc[0:3*n_teams])
@app.callback(
Output('roster-table', 'children'),
[Input('players','children'),
Input('team-roster-dropdown','value')],
[State('roster','children')]
)
def update_roster_table(players_json,teamchoice,roster_json):
ros = pd.read_json(roster_json)
pl = pd.read_json(players_json)
pl['AVG'] = (pl['H']/pl['AB']).round(3)
pl['ERA'] = (9*pl['ER']/pl['IP']).round(2)
pl['WHIP'] = ((pl['BB']+pl['H.P'])/pl['IP']).round(2)
teampl = pl.loc[pl.Team == teamchoice]
retcols = ['Slot','Player','Rd','AB','R','HR','RBI','SB','AVG',
'IP', 'ERA', 'W', 'SO', 'SV', 'WHIP']
ret = ros.merge(teampl,on='Slot',how='left').sort_values('Num')
return make_table(ret[retcols])
@app.callback(
Output('bat-proj-table', 'children'),
[Input('pick-dropdown','value')],
[State('players','children')]
)
def update_bat_proj_table(pick,players_json):
pl = pd.read_json(players_json)
pickrank = int(pick.split('.')[0])
pick_idx = pl.loc[pl.Rank == pickrank].index[0]
pl['AVG'] = (pl['H']/pl['AB']).round(3)
if pl.loc[pick_idx,['AB']].count() > 0:
return make_table(pl.loc[[pick_idx],['AB', 'R', 'HR', 'RBI', 'SB','AVG']])
else:
return make_table(pd.DataFrame({}))
@app.callback(
Output('pit-proj-table', 'children'),
[Input('pick-dropdown','value')],
[State('players','children')]
)
def update_pit_proj_table(pick,players_json):
pl = pd.read_json(players_json)
pickrank = int(pick.split('.')[0])
pick_idx = pl.loc[pl.Rank == pickrank].index[0]
pl['WHIP'] = ((pl['BB']+pl['H.P'])/pl['IP']).round(2)
pl['ERA'] = (9*pl['ER']/pl['IP']).round(2)
if pl.loc[pick_idx,['IP']].count() > 0:
return make_table(pl.loc[[pick_idx],['IP', 'ERA', 'W', 'SO', 'SV', 'WHIP']])
else:
return make_table(pd.DataFrame({}))
@app.callback(
Output('proj-standings-table','children'),
[Input('players','children'),
Input('proj-type-radioitems','value')]
)
def update_proj_standings(players_json,proj_type):
df = pd.read_json(players_json)
dfg=df.groupby('Team')[['AB', 'H', 'R', 'HR', 'RBI', 'SB', 'IP', 'ER', 'W',
'SO', 'SV', 'H.P','BB']].sum().reset_index().sort_values('Team')
dfg['AVG'] = (dfg['H']/dfg['AB']).round(3)
dfg['ERA'] = (9*dfg['ER']/dfg['IP']).round(2)
dfg['WHIP'] = ((dfg['BB']+dfg['H.P'])/dfg['IP']).round(2)
ranks = {'Team':dfg.Team}
for m in ['R', 'HR', 'RBI', 'SB','AVG', 'W','SO', 'SV']:
ranks.update({m: dfg[m].rank(ascending=False)})
for m in ['ERA','WHIP']:
ranks.update({m: dfg[m].rank()})
rdf = pd.DataFrame(ranks,index=dfg.index)
rdf['Score'] = rdf.sum(axis=1)
if proj_type == 'Ranks':
return make_table(rdf.sort_values('Score'))
else:
dfg['Score'] = rdf.Score
return make_table(dfg[rdf.columns].sort_values('Score'))
@app.callback(
[Output('n-teams','children'),
Output('position','children'),
Output('pick-number','children'),
Output('players','children'),
Output('begin-button','n_clicks'),
Output('draft-button','n_clicks'),
Output('team-roster-dropdown','options'),
Output('main-section','style'),
Output('start-section','style')],
[Input('begin-button', 'n_clicks'),
Input('n-teams-dropdown','value'),
Input('position-dropdown','value'),
Input('draft-button','n_clicks'),
Input('pick-dropdown','value')],
[State('n-teams','children'),
State('position','children'),
State('pick-number','children'),
State('team-roster-dropdown','options'),
State('main-section','style'),
State('start-section','style'),
State('players','children'),
State('roster','children')]
)
def update_data(begin_clicks,n_teams,position,draft_clicks,pick,
prev_n_teams,prev_position,pick_number,prev_opts,
prev_style1,prev_style2,players_json,roster_json):
if begin_clicks is not None:
# prepare data frames
pl = pd.read_json(players_json)
ros = pd.read_json(roster_json)
# initial autopicks
pl = get_auto_picks(1, position, pl, n_teams, ros)
# list of team names
opts = ['My-Team'] + [teamnames[i] for i in range(1,n_teams+1) if i != position]
return (n_teams, position, position, pl.to_json(),
None, None, opts, {'display':'block'}, {'display':'none'})
elif draft_clicks is not None:
pl = | pd.read_json(players_json) | pandas.read_json |
from typing import Any, Dict, Tuple, Union, Mapping, Optional, Sequence
from typing_extensions import Literal
from enum import auto
from types import MappingProxyType
from pathlib import Path
from datetime import datetime
from anndata import AnnData
from cellrank import logging as logg
from cellrank._key import Key
from cellrank.tl._enum import ModeEnum
from cellrank.ul._docs import d
from cellrank.tl._utils import (
save_fig,
_eigengap,
_fuzzy_to_discrete,
_series_from_one_hot_matrix,
)
from cellrank.tl._colors import _get_black_or_white, _create_categorical_colors
from cellrank.tl._lineage import Lineage
from cellrank.tl.estimators._utils import SafeGetter
from cellrank.tl.estimators.mixins import EigenMixin, SchurMixin, LinDriversMixin
from cellrank.tl.kernels._base_kernel import KernelExpression
from cellrank.tl.estimators.mixins._utils import logger, shadow, register_plotter
from cellrank.tl.estimators.terminal_states._term_states_estimator import (
TermStatesEstimator,
)
import numpy as np
import pandas as pd
from scipy.sparse import spmatrix
from pandas.api.types import infer_dtype, is_categorical_dtype
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.colors import Normalize, ListedColormap
from matplotlib.ticker import StrMethodFormatter
from matplotlib.colorbar import ColorbarBase
class TermStatesMethod(ModeEnum): # noqa: D101
EIGENGAP = auto()
EIGENGAP_COARSE = auto()
TOP_N = auto()
STABILITY = auto()
@d.dedent
class GPCCA(TermStatesEstimator, LinDriversMixin, SchurMixin, EigenMixin):
"""
Generalized Perron Cluster Cluster Analysis :cite:`reuter:18` as implemented in \
`pyGPCCA <https://pygpcca.readthedocs.io/en/latest/>`_.
Coarse-grains a discrete Markov chain into a set of macrostates and computes coarse-grained transition probabilities
among the macrostates. Each macrostate corresponds to an area of the state space, i.e. to a subset of cells. The
assignment is soft, i.e. each cell is assigned to every macrostate with a certain weight, where weights sum to
one per cell. Macrostates are computed by maximizing the 'crispness' which can be thought of as a measure for
minimal overlap between macrostates in a certain inner-product sense. Once the macrostates have been computed,
we project the large transition matrix onto a coarse-grained transition matrix among the macrostates via
a Galerkin projection. This projection is based on invariant subspaces of the original transition matrix which
are obtained using the real Schur decomposition :cite:`reuter:18`.
Parameters
----------
%(base_estimator.parameters)s
"""
def __init__(
self,
obj: Union[AnnData, np.ndarray, spmatrix, KernelExpression],
obsp_key: Optional[str] = None,
**kwargs: Any,
):
super().__init__(obj=obj, obsp_key=obsp_key, **kwargs)
self._coarse_init_dist: Optional[pd.Series] = None
self._coarse_stat_dist: Optional[pd.Series] = None
self._coarse_tmat: Optional[pd.DataFrame] = None
self._macrostates: Optional[pd.Series] = None
self._macrostates_memberships: Optional[Lineage] = None
self._macrostates_colors: Optional[np.ndarray] = None
self._term_states_memberships: Optional[Lineage] = None
@property
@d.get_summary(base="gpcca_macro")
def macrostates(self) -> Optional[pd.Series]:
"""Macrostates of the transition matrix."""
return self._macrostates
@property
@d.get_summary(base="gpcca_macro_memberships")
def macrostates_memberships(self) -> Optional[Lineage]:
"""Macrostate membership matrix.
Soft assignment of microstates (cells) to macrostates.
"""
return self._macrostates_memberships
@property
@d.get_summary(base="gpcca_term_states_memberships")
def terminal_states_memberships(self) -> Optional[Lineage]:
"""Terminal state membership matrix.
Soft assignment of cells to terminal states.
"""
return self._term_states_memberships
@property
@d.get_summary(base="gpcca_coarse_init")
def coarse_initial_distribution(self) -> Optional[pd.Series]:
"""Coarse-grained initial distribution."""
return self._coarse_init_dist
@property
@d.get_summary(base="gpcca_coarse_stat")
def coarse_stationary_distribution(self) -> Optional[pd.Series]:
"""Coarse-grained stationary distribution."""
return self._coarse_stat_dist
@property
@d.get_summary(base="gpcca_coarse_tmat")
def coarse_T(self) -> Optional[pd.DataFrame]:
"""Coarse-grained transition matrix."""
return self._coarse_tmat
@d.get_sections(base="gpcca_compute_macro", sections=["Parameters", "Returns"])
@d.dedent
def compute_macrostates(
self,
n_states: Optional[Union[int, Sequence[int]]] = None,
n_cells: Optional[int] = 30,
cluster_key: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Compute the macrostates.
Parameters
----------
n_states
Number of macrostates. If a :class:`typing.Sequence`, use the *minChi* criterion :cite:`reuter:18`.
If `None`, use the *eigengap* heuristic.
%(n_cells)s
cluster_key
If a key to cluster labels is given, names and colors of the states will be associated with the clusters.
kwargs
Keyword arguments for :meth:`compute_schur`.
Returns
-------
Nothing, just updates the following fields:
- :attr:`macrostates` - %(gpcca_macro.summary)s
- :attr:`macrostates_memberships` - %(gpcca_macro_memberships.summary)s
- :attr:`coarse_T` - %(gpcca_coarse_tmat.summary)s
- :attr:`coarse_initial_distribution` - %(gpcca_coarse_init.summary)s
- :attr:`coarse_stationary_distribution` - %(gpcca_coarse_stat.summary)s
- :attr:`schur_vectors` - %(schur_vectors.summary)s
- :attr:`schur_matrix` - %(schur_matrix.summary)s
- :attr:`eigendecomposition` - %(eigen.summary)s
"""
n_states = self._n_states(n_states)
if n_states == 1:
self._compute_one_macrostate(
n_cells=n_cells,
cluster_key=cluster_key,
)
return
if self._gpcca is None or kwargs:
self.compute_schur(n_states, **kwargs)
n_states = self._validate_n_states(n_states)
if self._gpcca._p_X.shape[1] < n_states:
# precomputed X
logg.warning(
f"Requested more macrostates `{n_states}` than available "
f"Schur vectors `{self._gpcca._p_X.shape[1]}`. Recomputing the decomposition"
)
start = logg.info(f"Computing `{n_states}` macrostates")
try:
self._gpcca = self._gpcca.optimize(m=n_states)
except ValueError as e:
if "will split complex conjugate eigenvalues" not in str(e):
raise
# this is the following case - we have 4 Schur vectors, user requests 5 states, but it splits the conj. ev.
# in the try block, Schur decomposition with 5 vectors is computed, but it fails (no way of knowing)
# so in this case, we increase it by 1
logg.warning(
f"Unable to compute macrostates with `n_states={n_states}` because it will "
f"split complex conjugate eigenvalues. Using `n_states={n_states + 1}`"
)
self._gpcca = self._gpcca.optimize(m=n_states + 1)
self._set_macrostates(
memberships=self._gpcca.memberships,
n_cells=n_cells,
cluster_key=cluster_key,
params=self._create_params(),
time=start,
)
@d.dedent
def predict(
self,
method: Literal[
"stability", "top_n", "eigengap", "eigengap_coarse"
] = TermStatesMethod.STABILITY,
n_cells: int = 30,
alpha: Optional[float] = 1,
stability_threshold: float = 0.96,
n_states: Optional[int] = None,
) -> None:
"""
Automatically select terminal states from macrostates.
Parameters
----------
method
How to select the terminal states. Valid option are:
- `'eigengap'` - select the number of states based on the *eigengap* of :attr:`transition_matrix`.
- `'eigengap_coarse'` - select the number of states based on the *eigengap* of the diagonal
of :attr:`coarse_T`.
- `'top_n'` - select top ``n_states`` based on the probability of the diagonal of :attr:`coarse_T`.
- `'stability'` - select states which have a stability >= ``stability_threshold``.
The stability is given by the diagonal elements of :attr:`coarse_T`.
%(n_cells)s
alpha
Weight given to the deviation of an eigenvalue from one.
Only used when ``method = 'eigengap'`` or ``method = 'eigengap_coarse'``.
stability_threshold
Threshold used when ``method = 'stability'``.
n_states
Number of states used when ``method = 'top_n'``.
Returns
-------
Nothing, just updates the following fields:
- :attr:`terminal_states` - %(tse_term_states.summary)s
- :attr:`terminal_states_memberships` - %(gpcca_term_states_memberships.summary)s
- :attr:`terminal_states_probabilities` - %(tse_term_states_probs.summary)s
"""
if self.macrostates is None:
raise RuntimeError("Compute macrostates first as `.compute_macrostates()`.")
# fmt: off
if len(self._macrostates.cat.categories) == 1:
logg.warning("Found only one macrostate. Making it the single terminal state")
self.set_terminal_states_from_macrostates(None, n_cells=n_cells, params=self._create_params())
return
method = TermStatesMethod(method)
eig = self.eigendecomposition
coarse_T = self.coarse_T
if method == TermStatesMethod.EIGENGAP:
if eig is None:
raise RuntimeError("Compute eigendecomposition first as `.compute_eigendecomposition()`.")
n_states = _eigengap(eig["D"], alpha=alpha) + 1
elif method == TermStatesMethod.EIGENGAP_COARSE:
if coarse_T is None:
raise RuntimeError("Compute macrostates first as `.compute_macrostates()`.")
n_states = _eigengap(np.sort(np.diag(coarse_T)[::-1]), alpha=alpha)
elif method == TermStatesMethod.TOP_N:
if n_states is None:
raise ValueError("Expected `n_states != None` for `method='top_n'`.")
elif n_states <= 0:
raise ValueError(f"Expected `n_states` to be positive, found `{n_states}`.")
elif method == TermStatesMethod.STABILITY:
if stability_threshold is None:
raise ValueError("Expected `stability_threshold != None` for `method='stability'`.")
stability = pd.Series(np.diag(coarse_T), index=coarse_T.columns)
names = stability[stability.values >= stability_threshold].index
self.set_terminal_states_from_macrostates(names, n_cells=n_cells, params=self._create_params())
return
else:
raise NotImplementedError(f"Method `{method}` is not yet implemented.")
# fmt: on
names = coarse_T.columns[np.argsort(np.diag(coarse_T))][-n_states:]
self.set_terminal_states_from_macrostates(
names, n_cells=n_cells, params=self._create_params()
)
return
@d.dedent
def set_terminal_states_from_macrostates(
self,
names: Optional[Union[str, Sequence[str], Mapping[str, str]]] = None,
n_cells: int = 30,
**kwargs: Any,
) -> None:
"""
Manually select terminal states from macrostates.
Parameters
----------
names
Names of the macrostates to be marked as terminal. Multiple states can be combined using `','`,
such as ``["Alpha, Beta", "Epsilon"]``. If a :class:`dict`, keys correspond to the names
of the macrostates and the values to the new names. If `None`, select all macrostates.
%(n_cells)s
Returns
-------
Nothing, just updates the following fields:
- :attr:`terminal_states` - %(tse_term_states.summary)s
- :attr:`terminal_states_probabilities` - %(tse_term_states_probs.summary)s
- :attr:`terminal_states_probabilities_memberships` - %(gpcca_term_states_memberships.summary)s
"""
if n_cells <= 0:
raise ValueError(f"Expected `n_cells` to be positive, found `{n_cells}`.")
memberships = self.macrostates_memberships
if memberships is None:
raise RuntimeError("Compute macrostates first as `.compute_macrostates()`.")
rename = True
if names is None:
names = memberships.names
rename = False
if isinstance(names, str):
names = [names]
rename = False
if not isinstance(names, dict):
names = {n: n for n in names}
rename = False
if not len(names):
raise ValueError("No macrostates have been selected.")
# we do this also here because if `rename_terminal_states` fails
# invalid states would've been written to this object and nothing to adata
names = {str(k): str(v) for k, v in names.items()}
names_after_renaming = {names.get(n, n) for n in memberships.names}
if len(names_after_renaming) != memberships.shape[1]:
raise ValueError(
f"After renaming, terminal state names will no longer be unique: `{names_after_renaming}`."
)
# this also checks that the names are correct before renaming
is_singleton = memberships.shape[1] == 1
memberships = memberships[list(names.keys())].copy()
states = self._create_states(memberships, n_cells=n_cells, check_row_sums=False)
if is_singleton:
colors = self._macrostates_colors.copy()
probs = memberships.X.squeeze() / memberships.X.max()
else:
colors = memberships[list(states.cat.categories)].colors
probs = (memberships.X / memberships.X.max(0)).max(1)
probs = pd.Series(probs, index=self.adata.obs_names)
self._write_terminal_states(
states, colors, probs, memberships, params=kwargs.pop("params", {})
)
if rename:
# TODO(michalk8): in a future PR, remove this behavior in Lineage
# access lineage renames join states, e.g. 'Alpha, Beta' becomes 'Alpha or Beta' + whitespace stripping
self.rename_terminal_states(
dict(zip(self.terminal_states.cat.categories, names.values()))
)
@d.dedent
def rename_terminal_states(self, new_names: Mapping[str, str]) -> None:
"""
%(tse_rename_term_states.full_desc)s
Parameters
----------
%(tse_rename_term_states.parameters)s
Returns
-------
%(tse_rename_term_states.returns)s
- :attr:`terminal_states_memberships` - %(gpcca_term_states_memberships.summary)s
""" # noqa: D400
term_states_memberships = self.terminal_states_memberships
super().rename_terminal_states(new_names)
# fmt: off
new_names = {str(k): str(v) for k, v in new_names.items()}
term_states_memberships.names = [new_names.get(n, n) for n in term_states_memberships.names]
self._set("_term_states_memberships", value=term_states_memberships, shadow_only=True)
# fmt: on
with self._shadow:
key = Key.obsm.memberships(Key.obs.macrostates(self.backward))
self._set(obj=self.adata.obsm, key=key, value=term_states_memberships)
@d.dedent
def fit(
self,
n_states: Optional[Union[int, Sequence[int]]] = None,
n_cells: Optional[int] = 30,
cluster_key: Optional[str] = None,
**kwargs: Any,
) -> "GPCCA":
"""
Prepare self for terminal states prediction.
Parameters
----------
%(gpcca_compute_macro.parameters)s
Returns
-------
%(gpcca_compute_macro.returns)s
"""
if n_states is None:
self.compute_eigendecomposition()
n_states = self.eigendecomposition["eigengap"] + 1
if isinstance(n_states, int) and n_states == 1:
self.compute_eigendecomposition()
self.compute_macrostates(n_states=n_states, cluster_key=cluster_key, **kwargs)
return self
@d.dedent
def plot_coarse_T(
self,
show_stationary_dist: bool = True,
show_initial_dist: bool = False,
cmap: Union[str, ListedColormap] = "viridis",
xtick_rotation: float = 45,
annotate: bool = True,
show_cbar: bool = True,
title: Optional[str] = None,
figsize: Tuple[float, float] = (8, 8),
dpi: int = 80,
save: Optional[Union[Path, str]] = None,
text_kwargs: Mapping[str, Any] = MappingProxyType({}),
**kwargs: Any,
) -> None:
"""
Plot the coarse-grained transition matrix between macrostates.
Parameters
----------
show_stationary_dist
Whether to show :attr:`coarse_stationary_distribution`, if present.
show_initial_dist
Whether to show :attr:`coarse_initial_distribution`.
cmap
Colormap to use.
xtick_rotation
Rotation of ticks on the x-axis.
annotate
Whether to display the text on each cell.
show_cbar
Whether to show colorbar.
title
Title of the figure.
%(plotting)s
text_kwargs
Keyword arguments for :func:`matplotlib.pyplot.text`.
kwargs
Keyword arguments for :func:`matplotlib.pyplot.imshow`.
Returns
-------
%(just_plots)s
"""
def stylize_dist(
ax: Axes, data: np.ndarray, xticks_labels: Sequence[str] = ()
) -> None:
_ = ax.imshow(data, aspect="auto", cmap=cmap, norm=norm)
for spine in ax.spines.values():
spine.set_visible(False)
if xticks_labels is not None:
ax.set_xticks(np.arange(data.shape[1]))
ax.set_xticklabels(xticks_labels)
plt.setp(
ax.get_xticklabels(),
rotation=xtick_rotation,
ha="right",
rotation_mode="anchor",
)
else:
ax.set_xticks([])
ax.tick_params(
which="both", top=False, right=False, bottom=False, left=False
)
ax.set_yticks([])
def annotate_heatmap(im, valfmt: str = "{x:.2f}") -> None:
# modified from matplotlib's site
data = im.get_array()
kw = {"ha": "center", "va": "center"}
kw.update(**text_kwargs)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=_get_black_or_white(im.norm(data[i, j]), cmap))
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
def annotate_dist_ax(ax, data: np.ndarray, valfmt: str = "{x:.2f}"):
if ax is None:
return
if isinstance(valfmt, str):
valfmt = StrMethodFormatter(valfmt)
kw = {"ha": "center", "va": "center"}
kw.update(**text_kwargs)
for i, val in enumerate(data):
kw.update(color=_get_black_or_white(im.norm(val), cmap))
ax.text(
i,
0,
valfmt(val, None),
**kw,
)
coarse_T = self.coarse_T
coarse_init_d = self.coarse_initial_distribution
coarse_stat_d = self.coarse_stationary_distribution
if coarse_T is None:
raise RuntimeError(
"Compute coarse-grained transition matrix first as `.compute_macrostates()` with `n_states > 1`."
)
if show_stationary_dist and coarse_stat_d is None:
logg.warning("Coarse stationary distribution is `None`, ignoring")
show_stationary_dist = False
if show_initial_dist and coarse_init_d is None:
logg.warning("Coarse initial distribution is `None`, ignoring")
show_initial_dist = False
hrs, wrs = [1], [1]
if show_stationary_dist:
hrs += [0.05]
if show_initial_dist:
hrs += [0.05]
if show_cbar:
wrs += [0.025]
dont_show_dist = not show_initial_dist and not show_stationary_dist
fig = plt.figure(constrained_layout=False, figsize=figsize, dpi=dpi)
gs = plt.GridSpec(
1 + show_stationary_dist + show_initial_dist,
1 + show_cbar,
height_ratios=hrs,
width_ratios=wrs,
wspace=0.05,
hspace=0.05,
)
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
ax = fig.add_subplot(gs[0, 0])
cax = fig.add_subplot(gs[:1, -1]) if show_cbar else None
init_ax, stat_ax = None, None
labels = list(self.coarse_T.columns)
tmp = coarse_T
if show_initial_dist:
tmp = np.c_[tmp, coarse_stat_d]
if show_initial_dist:
tmp = np.c_[tmp, coarse_init_d]
minn, maxx = np.nanmin(tmp), np.nanmax(tmp)
norm = Normalize(vmin=minn, vmax=maxx)
if show_stationary_dist:
stat_ax = fig.add_subplot(gs[1, 0])
stylize_dist(
stat_ax,
np.array(coarse_stat_d).reshape(1, -1),
xticks_labels=labels if not show_initial_dist else None,
)
stat_ax.yaxis.set_label_position("right")
stat_ax.set_ylabel("stationary dist", rotation=0, ha="left", va="center")
if show_initial_dist:
init_ax = fig.add_subplot(gs[show_stationary_dist + show_initial_dist, 0])
stylize_dist(
init_ax, np.array(coarse_init_d).reshape(1, -1), xticks_labels=labels
)
init_ax.yaxis.set_label_position("right")
init_ax.set_ylabel("initial dist", rotation=0, ha="left", va="center")
im = ax.imshow(coarse_T, aspect="auto", cmap=cmap, norm=norm, **kwargs)
ax.set_title("coarse-grained transition matrix" if title is None else title)
if cax is not None:
_ = ColorbarBase(
cax,
cmap=cmap,
norm=norm,
ticks=np.linspace(minn, maxx, 10),
format="%0.3f",
)
ax.set_yticks(np.arange(coarse_T.shape[0]))
ax.set_yticklabels(labels)
ax.tick_params(
top=False,
bottom=dont_show_dist,
labeltop=False,
labelbottom=dont_show_dist,
)
for spine in ax.spines.values():
spine.set_visible(False)
if dont_show_dist:
ax.set_xticks(np.arange(coarse_T.shape[1]))
ax.set_xticklabels(labels)
plt.setp(
ax.get_xticklabels(),
rotation=xtick_rotation,
ha="right",
rotation_mode="anchor",
)
else:
ax.set_xticks([])
ax.set_yticks(np.arange(coarse_T.shape[0] + 1) - 0.5, minor=True)
ax.tick_params(which="minor", bottom=dont_show_dist, left=False, top=False)
if annotate:
annotate_heatmap(im)
if show_stationary_dist:
annotate_dist_ax(stat_ax, coarse_stat_d.values)
if show_initial_dist:
annotate_dist_ax(init_ax, coarse_init_d.values)
if save:
save_fig(fig, save)
@d.dedent
def plot_macrostate_composition(
self,
key: str,
width: float = 0.8,
title: Optional[str] = None,
labelrot: float = 45,
legend_loc: Optional[str] = "upper right out",
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
save: Optional[Union[str, Path]] = None,
show: bool = True,
) -> Optional[Axes]:
"""
Plot stacked histogram of macrostates over categorical annotations.
Parameters
----------
%(adata)s
key
Key from :attr:`anndata.AnnData.obs` containing categorical annotations.
width
Bar width in `[0, 1]`.
title
Title of the figure. If `None`, create one automatically.
labelrot
Rotation of labels on x-axis.
legend_loc
Position of the legend. If `None`, don't show legend.
%(plotting)s
show
If `False`, return :class:`matplotlib.pyplot.Axes`.
Returns
-------
The axes object, if ``show = False``.
%(just_plots)s
"""
from cellrank.pl._utils import _position_legend
macrostates = self.macrostates
if macrostates is None:
raise RuntimeError("Compute macrostates first as `.compute_macrostates()`.")
if key not in self.adata.obs:
raise KeyError(f"Data not found in `adata.obs[{key!r}]`.")
if not is_categorical_dtype(self.adata.obs[key]):
raise TypeError(
f"Expected `adata.obs[{key!r}]` to be `categorical`, "
f"found `{infer_dtype(self.adata.obs[key])}`."
)
mask = ~macrostates.isnull()
df = (
pd.DataFrame({"macrostates": macrostates, key: self.adata.obs[key]})[mask]
.groupby([key, "macrostates"])
.size()
)
try:
cats_colors = self.adata.uns[f"{key}_colors"]
except KeyError:
cats_colors = _create_categorical_colors(
len(self.adata.obs[key].cat.categories)
)
cat_color_mapper = dict(zip(self.adata.obs[key].cat.categories, cats_colors))
x_indices = np.arange(len(macrostates.cat.categories))
bottom = np.zeros_like(x_indices, dtype=np.float32)
width = min(1, max(0, width))
fig, ax = plt.subplots(figsize=figsize, dpi=dpi, tight_layout=True)
for cat, color in cat_color_mapper.items():
frequencies = df.loc[cat]
# do not add to legend if category is missing
if np.sum(frequencies) > 0:
ax.bar(
x_indices,
frequencies,
width,
label=cat,
color=color,
bottom=bottom,
ec="black",
lw=0.5,
)
bottom += np.array(frequencies)
ax.set_xticks(x_indices)
ax.set_xticklabels(
# assuming at least 1 category
frequencies.index,
rotation=labelrot,
ha="center" if labelrot in (0, 90) else "right",
)
y_max = bottom.max()
ax.set_ylim([0, y_max + 0.05 * y_max])
ax.set_yticks(np.linspace(0, y_max, 5))
ax.margins(0.05)
ax.set_xlabel("macrostate")
ax.set_ylabel("frequency")
if title is None:
title = f"distribution over {key}"
ax.set_title(title)
if legend_loc not in (None, "none"):
_position_legend(ax, legend_loc=legend_loc)
if save is not None:
save_fig(fig, save)
if not show:
return ax
def _n_states(self, n_states: Optional[Union[int, Sequence[int]]]) -> int:
if n_states is None:
if self.eigendecomposition is None:
raise RuntimeError(
"Compute eigendecomposition first as `.compute_eigendecomposition()` or "
"supply `n_states != None`."
)
return self.eigendecomposition["eigengap"] + 1
# fmt: off
if isinstance(n_states, int):
if n_states <= 0:
raise ValueError(f"Expected `n_states` to be positive, found `{n_states}`.")
return n_states
if self._gpcca is None:
raise RuntimeError("Compute Schur decomposition first as `.compute_schur()`.")
if not isinstance(n_states, Sequence):
raise TypeError(f"Expected `n_states` to be a `Sequence`, found `{type(n_states).__name__!r}`.")
if len(n_states) != 2:
raise ValueError(f"Expected `n_states` to be of size `2`, found `{len(n_states)}`.")
minn, maxx = sorted(n_states)
if minn <= 1:
minn = 2
logg.warning(f"Minimum value must be larger than `1`, found `{minn}`. Setting `min={minn}`")
if minn == 2:
minn = 3
logg.warning(
f"In most cases, 2 clusters will always be optimal. "
f"If you really expect 2 clusters, use `n_states=2`. Setting `min={minn}`"
)
# fmt: on
maxx = max(minn + 1, maxx)
logg.info(f"Calculating minChi criterion in interval `[{minn}, {maxx}]`")
return int(np.arange(minn, maxx + 1)[np.argmax(self._gpcca.minChi(minn, maxx))])
def _create_states(
self,
probs: Union[np.ndarray, Lineage],
n_cells: int,
check_row_sums: bool = False,
return_not_enough_cells: bool = False,
) -> Union[pd.Series, Tuple[pd.Series, np.ndarray]]:
if n_cells <= 0:
raise ValueError(f"Expected `n_cells` to be positive, found `{n_cells}`.")
discrete, not_enough_cells = _fuzzy_to_discrete(
a_fuzzy=probs,
n_most_likely=n_cells,
remove_overlap=False,
raise_threshold=0.2,
check_row_sums=check_row_sums,
)
states = _series_from_one_hot_matrix(
membership=discrete,
index=self.adata.obs_names,
names=probs.names if isinstance(probs, Lineage) else None,
)
return (states, not_enough_cells) if return_not_enough_cells else states
def _validate_n_states(self, n_states: int) -> int:
if self._invalid_n_states is not None and n_states in self._invalid_n_states:
logg.warning(
f"Unable to compute macrostates with `n_states={n_states}` because it will "
f"split complex conjugate eigenvalues. Using `n_states={n_states + 1}`"
)
n_states += 1 # cannot force recomputation of the Schur decomposition
assert n_states not in self._invalid_n_states, "Sanity check failed."
return n_states
def _compute_one_macrostate(
self,
n_cells: Optional[int],
cluster_key: Optional[str],
) -> None:
start = logg.info("For 1 macrostate, stationary distribution is computed")
eig = self.eigendecomposition
if (
eig is not None
and "stationary_dist" in eig
and eig["params"]["which"] == "LR"
):
stationary_dist = eig["stationary_dist"]
else:
self.compute_eigendecomposition(only_evals=False, which="LR")
stationary_dist = self.eigendecomposition["stationary_dist"]
self._set_macrostates(
memberships=stationary_dist[:, None],
n_cells=n_cells,
cluster_key=cluster_key,
check_row_sums=False,
time=start,
)
@d.dedent
def _set_macrostates(
self,
memberships: np.ndarray,
n_cells: Optional[int] = 30,
cluster_key: str = "clusters",
check_row_sums: bool = True,
time: Optional[datetime] = None,
params: Dict[str, Any] = MappingProxyType({}),
) -> None:
"""
Map fuzzy clustering to pre-computed annotations to get names and colors.
Given the fuzzy clustering, we would like to select the most likely cells from each state and use these to
give each state a name and a color by comparing with pre-computed, categorical cluster annotations.
Parameters
----------
memberships
Fuzzy clustering.
%(n_cells)s
cluster_key
Key from :attr:`anndata.AnnData.obs` to get reference cluster annotations.
check_row_sums
Check whether rows in `memberships` sum to `1`.
time
Start time of macrostates computation.
params
Parameters used in macrostates computation.
Returns
-------
Nothing, just updates the field as described in :meth:`compute_macrostates`.
"""
if n_cells is None:
# fmt: off
logg.debug("Setting the macrostates using macrostate assignment")
assignment = pd.Series(np.argmax(memberships, axis=1).astype(str), dtype="category")
# sometimes, a category can be missing
assignment = assignment.cat.reorder_categories([str(i) for i in range(memberships.shape[1])])
not_enough_cells = []
# fmt: on
else:
logg.debug("Setting the macrostates using macrostates memberships")
# select the most likely cells from each macrostate
assignment, not_enough_cells = self._create_states(
memberships,
n_cells=n_cells,
check_row_sums=check_row_sums,
return_not_enough_cells=True,
)
# remove previous fields
self._write_terminal_states(None, None, None, None, log=False)
# fmt: off
assignment, colors = self._set_categorical_labels(assignment, cluster_key=cluster_key)
memberships = Lineage(memberships, names=list(assignment.cat.categories), colors=colors)
# fmt: on
groups = pd.DataFrame(assignment).groupby(0).size()
groups = groups[groups != n_cells].to_dict()
if len(groups):
logg.warning(
f"The following terminal states have different number "
f"of cells than requested ({n_cells}): {groups}"
)
self._write_macrostates(
assignment, colors, memberships, time=time, params=params
)
@logger
@shadow
def _write_macrostates(
self,
macrostates: pd.Series,
colors: np.ndarray,
memberships: Lineage,
params: Dict[str, Any] = MappingProxyType({}),
) -> str:
# fmt: off
names = list(macrostates.cat.categories)
key = Key.obs.macrostates(self.backward)
self._set("_macrostates", obj=self.adata.obs, key=key, value=macrostates, shadow_only=True)
ckey = Key.uns.colors(key)
self._set("_macrostates_colors", obj=self.adata.uns, key=ckey, value=colors, shadow_only=True)
mkey = Key.obsm.memberships(key)
self._set("_macrostates_memberships", obj=self.adata.obsm, key=mkey, value=memberships, shadow_only=True)
self.params[key] = dict(params)
if len(names) > 1:
# not using stationary distribution
g = self._gpcca
tmat = pd.DataFrame(g.coarse_grained_transition_matrix, index=names, columns=names)
init_dist = | pd.Series(g.coarse_grained_input_distribution, index=names) | pandas.Series |
import json
import os
import warnings
import random
import string
import csv
import time
import datetime
import io
import pandas as pd
from flask import (
Blueprint, flash, Flask, g, redirect, render_template, request, url_for, jsonify, Response
)
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField, BooleanField
from wtforms.validators import DataRequired, NumberRange, InputRequired
from wtforms.widgets import html5
from flask_wtf.csrf import CSRFProtect
from werkzeug.utils import secure_filename
from survey._app import app, csrf_protect
from survey.db import get_db, table_exists, insert, update
bp = Blueprint("admin", __name__)
###### helpers #####
class JobConfig(dict):
def __init__(self, job_id, api_key, nb_rows=0, unique_worker=True, base_code="", expected_judgments=0, payment_max_cents=0):
super().__init__()
self["job_id"] = job_id
self["api_key"] = api_key
self["nb_rows"] = nb_rows
self["unique_worker"] = unique_worker
self["base_code"] = base_code
self["expected_judgments"] = expected_judgments
self["payment_max_cents"] = payment_max_cents
self.__dict__ = self
def get_job_config(con, job_id, table="jobs"):
"""
Return a job config or the default configuration if the job wasn't found
:param con: (Connection)
:param job_id:
:param table:
"""
_job_config = None
if table_exists(con, table):
with con:
_job_config = con.execute(f"SELECT * from {table} WHERE job_id==?", (job_id,)).fetchone()
if _job_config is None:
job_config = JobConfig(job_id, api_key='')
else:
job_config = JobConfig(**_job_config)
return job_config
def update_job(con, job_id, job_config, table="jobs"):
if not table_exists(con, table):
insert(pd.DataFrame(data=[job_config]), table=table, con=con)
else:
with con:
check = con.execute(f"SELECT job_id from jobs where job_id==?", (job_id,)).fetchone()
if check:
update(
f"UPDATE {table} SET api_key=?, base_code=?, expected_judgments=?, payment_max_cents=?",
args=(job_config['api_key'], job_config['base_code'], job_config['expected_judgments'], job_config['payment_max_cents']),
con=con
)
else:
insert( | pd.DataFrame(data=[job_config]) | pandas.DataFrame |
# What is different in this kernel:
# - data preprocessing was modularised and hopefully made more clear, as repetitative actions were moved into a separate function
# - LightGBM hyperparameters were taken from my another kernel, where they were tuned to the `application` data subset only:
# https://www.kaggle.com/mlisovyi/lightgbm-hyperparameter-optimisation-lb-0-746
# - Check out the feature importance plot. It is VERY different from any other kernel.
# It is most likely related to the regularisation in the model. This will have to be studied
#
# What was borrowed in this kernel:
# This script is a fork of the awesome kernel by olivier, that insiper a lot of kernels on this competition:
# https://www.kaggle.com/ogrellier/good-fun-with-ligthgbm
# It also uses memory-footprint-reduction technique copied over from this very clear and useful kernel:
# https://www.kaggle.com/gemartin/load-data-reduce-memory-usage
# The tiny add-on to store OOF predictions on the training dataset was taken from this kernel:
# https://www.kaggle.com/tilii7/olivier-lightgbm-parameters-by-bayesian-opt/
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score, precision_recall_curve, roc_curve, average_precision_score
from sklearn.model_selection import KFold, StratifiedKFold
from lightgbm import LGBMClassifier
import matplotlib.pyplot as plt
import seaborn as sns
import gc
PATH='~/.kaggle/competitions/home-credit-default-risk/'
def reduce_mem_usage(df):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def import_data(file):
"""create a dataframe and optimize its memory usage"""
df = pd.read_csv(file, parse_dates=True, keep_date_col=True)
df = reduce_mem_usage(df)
return df
def average_dummies(df, dummy_col, count_col, gb_col, preffix='', del_input=True):
print('DF shape : ', df.shape)
if dummy_col:
print('transform to dummies')
df = pd.concat([df, pd.get_dummies(df[dummy_col])], axis=1).drop(dummy_col, axis=1)
if count_col and gb_col:
print('Counting buros')
df_counts = df[[gb_col, count_col]].groupby(gb_col).count()
df[count_col] = df[gb_col].map(df_counts[count_col])
avg_df = None
if gb_col:
print('averaging ')
avg_df = df.groupby(gb_col).mean()
if preffix:
avg_df.columns = [preffix + f_ for f_ in avg_df.columns]
print(avg_df.head())
print(df.head())
if del_input:
print('Deleting input')
del df
gc.collect()
if avg_df is not None:
print(avg_df.head())
print(avg_df.columns.values)
return avg_df
elif not del_input:
return df
else:
return None
def build_model_input():
print('Read Bureau_Balance')
buro_bal = import_data(PATH+'/bureau_balance.csv')
avg_buro_bal = average_dummies(buro_bal, dummy_col='STATUS', count_col='MONTHS_BALANCE', gb_col='SK_ID_BUREAU', preffix='avg_buro_')
print('Read Bureau')
buro_full = import_data(PATH+'/bureau.csv')
buro_full = average_dummies(buro_full, dummy_col=['CREDIT_ACTIVE', 'CREDIT_CURRENCY', 'CREDIT_TYPE'], count_col=None, gb_col=None, preffix=None, del_input=False)
print('Merge with buro avg')
buro_full = buro_full.merge(right=avg_buro_bal.reset_index(), how='left', on='SK_ID_BUREAU', suffixes=('', '_bur_bal'))
avg_buro = average_dummies(buro_full, dummy_col=None, count_col='SK_ID_BUREAU', gb_col='SK_ID_CURR', preffix='avg_buro_', del_input=True)
print('Read prev')
prev = import_data(PATH+'/previous_application.csv')
prev_cat_features = [ f_ for f_ in prev.columns if prev[f_].dtype == 'object' ]
avg_prev = average_dummies(prev, dummy_col=prev_cat_features, count_col='SK_ID_PREV', gb_col='SK_ID_CURR', preffix='prev_')
print('Reading POS_CASH')
pos = import_data(PATH+'/POS_CASH_balance.csv')
avg_pos = average_dummies(pos, dummy_col='NAME_CONTRACT_STATUS', count_col='SK_ID_PREV', gb_col='SK_ID_CURR', preffix='pos_')
print('Reading CC balance')
cc_bal = import_data(PATH+'/credit_card_balance.csv')
avg_cc_bal = average_dummies(cc_bal, dummy_col='NAME_CONTRACT_STATUS', count_col='SK_ID_PREV', gb_col='SK_ID_CURR', preffix='cc_bal_')
print('Reading Installments')
inst = import_data(PATH+'/installments_payments.csv')
avg_inst = average_dummies(inst, dummy_col=None, count_col='SK_ID_PREV', gb_col='SK_ID_CURR', preffix='inst_')
print('Read data and test')
data = import_data(PATH+'/application_train.csv')
test = import_data(PATH+'/application_test.csv')
print('Shapes : ', data.shape, test.shape)
y = data['TARGET']
del data['TARGET']
categorical_feats = [ f for f in data.columns if data[f].dtype == 'object' ]
categorical_feats
for f_ in categorical_feats:
data[f_], indexer = | pd.factorize(data[f_]) | pandas.factorize |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import calendar
import ccdproc
import collections
import datetime
import glob
import logging
import math
import numpy as np
import os
import pandas
import random
import re
import subprocess
import sys
import time
from astropy.utils import iers
iers.Conf.iers_auto_url.set('ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all')
from astroplan import Observer
from astropy import units as u
from astropy.io import fits
from astropy.convolution import convolve, Gaussian1DKernel, Box1DKernel
from astropy.coordinates import EarthLocation
from astropy.modeling import (models, fitting, Model)
from astropy.stats import sigma_clip
from astropy.time import Time
from astroscrappy import detect_cosmics
from matplotlib import pyplot as plt
from scipy import signal, interpolate
from threading import Timer
from . import check_version
__version__ = __import__('goodman_pipeline').__version__
log = logging.getLogger(__name__)
def astroscrappy_lacosmic(ccd, red_path=None, save_mask=False):
mask, ccd.data = detect_cosmics(ccd.data)
ccd.header['GSP_COSM'] = ('LACosmic',
"Cosmic ray rejection method")
log.info("Cosmic rays rejected using astroscrappy's lacosmic")
if save_mask and red_path is not None:
mask_ccd = ccd.copy()
mask_ccd.mask = mask
new_file_name = 'crmask_' + mask_ccd.header['GSP_FNAM']
mask_ccd.header['GSP_FNAM'] = new_file_name
log.info("Saving binary mask of cosmic rays to "
"{:s}".format(new_file_name))
write_fits(ccd=mask_ccd,
full_path=os.path.join(red_path, new_file_name))
return ccd
def add_wcs_keys(ccd):
"""Adds generic keyword for linear wavelength solution to the header
Linear wavelength solutions require a set of standard fits keywords. Later
on they will be updated accordingly.
The main goal of putting them here is to have consistent and nicely ordered
headers.
Notes:
This does NOT add a WCS solution, just the keywords.
Args:
ccd (CCDData) A :class:~astropy.nddata.CCDData` instance with no wcs
keywords.
Returns:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance with modified
header with added WCS keywords
"""
log.debug("Adding FITS LINEAR wcs keywords to header.")
ccd.header.set('BANDID1',
value='spectrum - background none, weights none, '
'clean no',
comment='')
ccd.header.set('APNUM1',
value='1 1 0 0',
comment='')
ccd.header.set('WCSDIM',
value=1,
comment='')
ccd.header.set('CTYPE1',
value='LINEAR',
comment='')
ccd.header.set('CRVAL1',
value=1,
comment='')
ccd.header.set('CRPIX1',
value=1,
comment='')
ccd.header.set('CDELT1',
value=1,
comment='')
ccd.header.set('CD1_1',
value=1,
comment='')
ccd.header.set('LTM1_1',
value=1,
comment='')
ccd.header.set('WAT0_001',
value='system=equispec',
comment='')
ccd.header.set('WAT1_001',
value='wtype=linear label=Wavelength units=angstroms',
comment='')
ccd.header.set('DC-FLAG',
value=0,
comment='')
ccd.header.set('DCLOG1',
value='REFSPEC1 = non set',
comment='')
return ccd
def add_linear_wavelength_solution(ccd, x_axis, reference_lamp, crpix=1):
"""Add wavelength solution to the new FITS header
Defines FITS header keyword values that will represent the wavelength
solution in the header so that the image can be read in any other
astronomical tool. (e.g. IRAF)
Args:
ccd (CCDData) Instance of :class:`~astropy.nddata.CCDData`
x_axis (ndarray): Linearized x-axis in angstrom
reference_lamp (str): Name of lamp used to get wavelength solution.
crpix (int): reference pixel for defining wavelength solution.
Default 1. For most cases 1 should be fine.
Returns:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance with
linear wavelength solution on it.
"""
assert crpix > 0
new_crpix = crpix
new_crval = x_axis[new_crpix - crpix]
new_cdelt = x_axis[new_crpix] - x_axis[new_crpix - crpix]
ccd.header.set('BANDID1', 'spectrum - background none, weights none, '
'clean no')
ccd.header.set('WCSDIM', 1)
ccd.header.set('CTYPE1', 'LINEAR ')
ccd.header.set('CRVAL1', new_crval)
ccd.header.set('CRPIX1', new_crpix)
ccd.header.set('CDELT1', new_cdelt)
ccd.header.set('CD1_1', new_cdelt)
ccd.header.set('LTM1_1', 1.)
ccd.header.set('WAT0_001', 'system=equispec')
ccd.header.set('WAT1_001', 'wtype=linear label=Wavelength units=angstroms')
ccd.header.set('DC-FLAG', 0)
ccd.header.set('DCLOG1', 'REFSPEC1 = {:s}'.format(reference_lamp))
return ccd
def bias_subtract(ccd, master_bias, master_bias_name):
"""Subtract bias from file.
Wrapper for :func:`~ccdproc.subtract_bias`. The main goal is to have a
consistent API for apps using the Goodman Pipeline as a library.
Args:
ccd (CCDData): A file to be bias-subtracted
master_bias (CCDData):
master_bias_name (str): Full path to master bias file, this is added to
the bias-subtracted ccd under `GSP_BIAS`.
Returns:
A bias-subtracted file.
"""
ccd = ccdproc.subtract_bias(ccd=ccd, master=master_bias, add_keyword=False)
log.info("Bias subtracted")
ccd.header.set('GSP_BIAS',
value=os.path.basename(master_bias_name),
comment="Master Bias Image")
return ccd
def bin_reference_data(wavelength, intensity, serial_binning):
"""Bins a 1D array
This method reduces the size of an unbinned array by binning.
The function to combine data is `numpy.mean`.
Args:
wavelength (array): Wavelength axis
intensity (array): Intensity
serial_binning (int): Serial Binning is the binning in the
dispersion axis.
Returns:
Binned wavelength and intensity arrays.
"""
if serial_binning != 1:
b_wavelength = ccdproc.block_reduce(wavelength,
serial_binning,
np.mean)
b_intensity = ccdproc.block_reduce(intensity,
serial_binning,
np.mean)
return b_wavelength, b_intensity
else:
return wavelength, intensity
def call_cosmic_rejection(ccd,
image_name,
out_prefix,
red_path,
keep_files=False,
prefix='c',
method='dcr',
save=False):
"""Call for the appropriate cosmic ray rejection method
There are four options when dealing with cosmic ray rejection in this
pipeline, The default option is called ``default`` and it will choose the
rejection method based on the binning of the image. Note that there are only
two *real* methods: ``dcr`` and ``lacosmic``.
For ``binning 1x1`` the choice will be ``dcr`` for ``binning 2x2`` and
``3x3`` will be ``lacosmic``.
The method ``dcr`` is a program written in C by <NAME> (http://users.camk.edu.pl/pych/DCR/) that works very well for
spectroscopy the only negative aspect is that integration with python was
difficult and not natively (through subprocess).
The method `lacosmic` is well known but there are different implementations,
we started using :func:`~ccdproc.cosmicray_lacosmic` but later we shifted
towards ``astroscrappy.detect_cosmics``. The LACosmic method was developed
by <NAME>. See <http://www.astro.yale.edu/dokkum/lacosmic/>
There is also the option of skipping cosmic ray removal by using ``none``.
Args:
ccd (CCCData): a :class:`~astropy.nddata.CCDData` instance.
image_name (str): Science image name.
out_prefix (str): Partial prefix to be added to the image name. Related
to previous processes and not cosmic ray rejection.
red_path (str): Path to reduced data directory.
keep_files (bool): If True, the original file and the cosmic ray mask
will not be deleted. Default is False.
prefix (str): Cosmic ray rejection related prefix to be added to image
name.
method (str): Method to use for cosmic ray rejection. There are four
options: `default`, `dcr`, `lacosmic` and `none`.
save (bool): Disables by default saving the images
Returns:
:class:`~astropy.nddata.CCDData` instance and `out_prefix` which is the
prefix added to the image name.
Raises:
NotImplementedError if the `method` argument is not `dcr`, `lacosmic`
nor `none`.
"""
log.debug("Cosmic ray rejection method from input is '{:s}'".format(method))
binning, _ = [int(i) for i in ccd.header['CCDSUM'].split()]
if method == 'default':
if binning == 1:
method = 'dcr'
log.info('Setting cosmic ray rejection method to:'
' {:s}'.format(method))
elif binning == 2:
method = 'lacosmic'
log.info('Setting cosmic ray rejection method to:'
' {:s}'.format(method))
elif binning == 3:
method = 'lacosmic'
log.info('Setting cosmic ray rejection method to:'
' {:s}'.format(method))
if ccd.header['OBSTYPE'] == 'COMP' and method != 'none':
log.info("Changing cosmic ray rejection method from '{:s}' to 'none'"
" for comparison lamp. Prefix 'c' will be added "
"anyway.".format(method))
method = 'none'
log.debug("Cosmic ray rejection changed to 'none' for this file: "
"{:s}".format(ccd.header['GSP_FNAM']))
out_prefix = prefix + out_prefix
if method == 'dcr':
log.warning('DCR does apply the correction to images if you want '
'the mask use --keep-cosmic-files')
if not os.path.isfile(os.path.join(red_path, 'dcr.par')):
_create = GenerateDcrParFile()
_instrument = ccd.header['INSTCONF']
_binning, _ = ccd.header['CCDSUM'].split()
_create(instrument=_instrument, binning=_binning, path=red_path)
full_path = os.path.join(red_path, f"{out_prefix}_{image_name}")
ccd.header.set('GSP_COSM',
value="DCR",
comment="Cosmic ray rejection method")
write_fits(ccd=ccd, full_path=full_path)
log.info('Saving image: {:s}'.format(full_path))
in_file = f"{out_prefix}_{image_name}"
# This is to return the prefix that will be used by dcr
# Not to be used by dcr_cosmicray_rejection
out_prefix = prefix + out_prefix
ccd = dcr_cosmicray_rejection(data_path=red_path,
in_file=in_file,
prefix=prefix,
keep_cosmic_files=keep_files,
save=save)
return ccd, out_prefix
elif method == 'lacosmic':
ccd = astroscrappy_lacosmic(ccd=ccd,
red_path=red_path,
save_mask=keep_files)
out_prefix = prefix + out_prefix
full_path = os.path.join(red_path, f"{out_prefix}_{image_name}")
if save:
log.info('Saving image: {:s}'.format(full_path))
write_fits(ccd=ccd, full_path=full_path)
return ccd, out_prefix
elif method == 'none':
full_path = os.path.join(red_path, f"{out_prefix}_{image_name}")
if save:
log.info('Saving image: {:s}'.format(full_path))
write_fits(ccd=ccd, full_path=full_path)
return ccd, out_prefix
else:
log.error('Unrecognized Cosmic Method {:s}'.format(method))
raise NotImplementedError
def create_master_bias(bias_files,
raw_data,
reduced_data,
technique):
"""Create Master Bias
Given a :class:`~pandas.DataFrame` object that contains a list of compatible bias.
This function creates the master flat using ccdproc.combine using median
and 3-sigma clipping.
Args:
bias_files (list): List of all bias files to be combined. They have
to be compatible with each other as no check is done in this method.
raw_data (str): Full path to raw data location.
reduced_data (str): Full path to were reduced data will reside.
technique (str): Name of observing technique. Imaging or
Spectroscopy.
Returns:
master_bias (object):
master_bias_name (str):
"""
assert isinstance(bias_files, list)
master_bias_list = []
log.info('Creating master bias')
for image_file in bias_files:
image_full_path = os.path.join(raw_data, image_file)
ccd = read_fits(image_full_path, technique=technique)
log.debug('Loading bias image: ' + image_full_path)
master_bias_list.append(ccd)
# combine bias for spectroscopy
log.info("Combining {} images to create master bias".format(
len(master_bias_list)))
master_bias = ccdproc.combine(master_bias_list,
method='median',
sigma_clip=True,
sigma_clip_low_thresh=3.0,
sigma_clip_high_thresh=3.0,
add_keyword=False)
# add name of images used to create master bias
for n in range(len(bias_files)):
master_bias.header['GSP_IC{:02d}'.format(n + 1)] = (
bias_files[n],
'Image used to create master bias')
master_bias_name = "master_bias_{}_{}_{}_R{:05.2f}_G{:05.2f}.fits".format(
master_bias.header['INSTCONF'].upper(),
technique[0:2].upper(),
"x".join(master_bias.header['CCDSUM'].split()),
float(master_bias.header['RDNOISE']),
float(master_bias.header['GAIN'])
)
write_fits(ccd=master_bias,
full_path=os.path.join(reduced_data, master_bias_name),
combined=True,
overwrite=True)
log.info('Created master bias: ' + master_bias_name)
return master_bias, master_bias_name
def create_master_flats(flat_files,
raw_data,
reduced_data,
technique,
overscan_region,
trim_section,
master_bias_name,
new_master_flat_name,
saturation_threshold,
ignore_bias=False):
"""Creates master flats
Using a list of compatible flat images it combines them using median and
1-sigma clipping. Also it apply all previous standard calibrations to
each image.
Args:
flat_files (list): List of files previously filtered, there is no
compatibility check in this function and is assumed the files are
combinables.
raw_data (str): Full path to raw data.
reduced_data (str): Full path to reduced data. Where reduced data
should be stored.
technique (str): Observing technique. Imaging or Spectroscopy.
overscan_region (str): Defines the area to be used to estimate the
overscan region for overscan correction. Should be in the format.
`[x1:x2.,y1:y2]`.
trim_section (str):Defines the area to be used after trimming
unusable selected parts (edges). In the format `[x1:x2.,y1:y2]`.
master_bias_name (str): Master bias name, can be full path or not.
If it is a relative path, the path will be ignored and will define
the full path as `raw_path` + `basename`.
new_master_flat_name (str): Name of the file to save new master
flat. Can be absolute path or not.
saturation_threshold (int): Saturation threshold, defines the percentage of
pixels above saturation level allowed for flat field images.
ignore_bias (bool): Flag to create master bias without master bias.
Returns:
The master flat :class:`~astropy.nddata.CCDData` instance and the
name of under which the master flat was stored. If it can't build
the master flat it will return None, None.
"""
cleaned_flat_list = []
master_flat_list = []
if os.path.isabs(os.path.dirname(new_master_flat_name)):
master_flat_name = new_master_flat_name
else:
master_flat_name = os.path.join(
reduced_data, os.path.basename(new_master_flat_name))
if not ignore_bias:
if os.path.isabs(os.path.dirname(master_bias_name)) and \
os.path.exists(master_bias_name):
master_bias = read_fits(master_bias_name, technique=technique)
else:
master_bias_name = os.path.join(reduced_data,
os.path.basename(master_bias_name))
master_bias = read_fits(master_bias_name, technique=technique)
master_bias = image_trim(ccd=master_bias,
trim_section=trim_section,
trim_type='trimsec')
log.info('Creating Master Flat')
for flat_file in flat_files:
if os.path.isabs(flat_file):
image_full_path = flat_file
else:
image_full_path = os.path.join(raw_data, flat_file)
log.debug('Loading flat image: ' + image_full_path)
ccd = read_fits(image_full_path, technique=technique)
if ignore_bias and technique == 'Spectroscopy':
ccd = image_overscan(ccd, overscan_region=overscan_region)
ccd = image_trim(ccd=ccd,
trim_section=trim_section,
trim_type='trimsec')
if not ignore_bias:
ccd = ccdproc.subtract_bias(ccd,
master_bias,
add_keyword=False)
ccd.header['GSP_BIAS'] = (
os.path.basename(master_bias_name),
'Master bias image')
else:
log.warning('Ignoring bias on request')
if is_file_saturated(ccd=ccd,
threshold=saturation_threshold):
log.warning('Removing saturated image {:s}. '
'Use --saturation_threshold to change saturation_threshold '
'level'.format(flat_file))
continue
else:
cleaned_flat_list.append(flat_file)
master_flat_list.append(ccd)
if master_flat_list != []:
log.info("Combining {} images to create master flat".format(
len(master_flat_list)))
master_flat = ccdproc.combine(master_flat_list,
method='median',
sigma_clip=True,
sigma_clip_low_thresh=1.0,
sigma_clip_high_thresh=1.0,
add_keyword=False)
# add name of images used to create master bias
for n in range(len(cleaned_flat_list)):
master_flat.header['GSP_IC{:02d}'.format(n + 1)] = (
cleaned_flat_list[n],
'Image used to create master flat')
write_fits(ccd=master_flat,
full_path=master_flat_name,
combined=True)
log.info('Created Master Flat: ' + master_flat_name)
return master_flat, master_flat_name
else:
log.error('Empty flat list. Check that they do not exceed the '
'saturation_threshold limit.')
return None, None
def cross_correlation(reference,
compared,
slit_size,
serial_binning,
mode='full',
plot=False):
"""Do cross correlation of two 1D spectra
It convolves the reference lamp depending on the slit size of the new_array
that corresponds with a comparison lamp.
If the slit is larger than 3 arcseconds the reference lamp is convolved with
a `~astropy.convolution.Box1DKernel` because spectral lines look more like a
block than a line. And if it is smaller or equal to 3 it will
use a `~astropy.convolution.Gaussian1DKernel` ponderated by the binning.
All reference lamp are unbinned, or binning is 1x1.
Args:
reference (array): Reference array.
compared (array): Array to be matched. A new reference lamp.
slit_size (float): Slit width in arcseconds
serial_binning (int): Binning in the spectral axis
mode (str): Correlation mode for `signal.correlate`.
plot (bool): Switch debugging plots on or off.
Returns:
correlation_value (int): Shift value in pixels.
"""
cyaxis2 = compared
if slit_size > 3:
box_width = slit_size / (0.15 * serial_binning)
log.debug('BOX WIDTH: {:f}'.format(box_width))
box_kernel = Box1DKernel(width=box_width)
max_before = np.max(reference)
cyaxis1 = convolve(reference, box_kernel)
max_after = np.max(cyaxis1)
cyaxis1 *= max_before / max_after
else:
kernel_stddev = slit_size / (0.15 * serial_binning)
gaussian_kernel = Gaussian1DKernel(stddev=kernel_stddev)
cyaxis1 = convolve(reference, gaussian_kernel)
cyaxis2 = convolve(compared, gaussian_kernel)
ccorr = signal.correlate(cyaxis1, cyaxis2, mode=mode)
max_index = np.argmax(ccorr)
x_ccorr = np.linspace(-int(len(ccorr) / 2.),
int(len(ccorr) / 2.),
len(ccorr))
correlation_value = x_ccorr[max_index]
if plot: # pragma: no cover
plt.ion()
plt.title('Cross Correlation')
plt.xlabel('Lag Value')
plt.ylabel('Correlation Value')
plt.plot(x_ccorr, ccorr)
plt.draw()
plt.pause(2)
plt.clf()
plt.ioff()
return correlation_value
def classify_spectroscopic_data(path, search_pattern):
"""Classify data by grouping them by a set of keywords.
This function uses :class:`~ccdproc.ImageFileCollection`. First it creates a
collection of information regarding the images located in ``path`` that
match the pattern ``search_pattern``. The information obtained are all
keywords listed in the list ``keywords``.
The :class:`~ccdproc.ImageFileCollection` object is translated into
:class:`~pandas.DataFrame` and then is used much like an SQL database to
select and filter values and in that way put them in groups that are
:class:`~pandas.DataFrame` instances.
The keywords retrieved are:
- ``date``
- ``slit``
- ``date-obs``
- ``obstype``
- ``object``
- ``exptime``
- ``obsra``
- ``obsdec``
- ``grating``
- ``cam_targ``
- ``grt_targ``
- ``filter``
- ``filter2``
- ``gain``
- ``rdnoise``.
Then all data is grouped by matching the following keywords:
- ``slit``
- ``radeg``
- ``decdeg``
- ``grating``
- ``cam_targ``
- ``grt_targ``
- ``filter``
- ``filter2``
- ``gain``
- ``rdnoise``
And finally, every group is classified as: a *comparison lamp-only* group,
an *object-only* group or a *group of object and comparison lamps*. The
comparison lamps present in the last group (``COMP`` + ``OBJECT``) are also
added in the first one (``COMP``-only).
Args:
path (str): Path to data location
search_pattern (str): Prefix to match files.
Returns:
Instance of :class:`goodman_pipeline.core.core.NightDataContainer`
"""
log.debug("Spectroscopic Data Classification")
search_path = os.path.join(path, search_pattern + '*.fits')
file_list = glob.glob(search_path)
if file_list == []:
log.error('No file found using search pattern '
'"{:s}"'.format(search_pattern))
sys.exit('Please use the argument --search-pattern to define the '
'common prefix for the files to be processed.')
data_container = NightDataContainer(path=path,
instrument=str('Red'),
technique=str('Spectroscopy'))
keywords = ['date',
'slit',
'date-obs',
'obstype',
'object',
'exptime',
'obsra',
'obsdec',
'grating',
'cam_targ',
'grt_targ',
'filter',
'filter2',
'gain',
'rdnoise',
'lamp_hga',
'lamp_ne',
'lamp_ar',
'lamp_fe',
'lamp_cu'
]
ifc = ccdproc.ImageFileCollection(path, keywords=keywords, filenames=file_list)
pifc = ifc.summary.to_pandas()
pifc['radeg'] = ''
pifc['decdeg'] = ''
for i in pifc.index.tolist():
radeg, decdeg = ra_dec_to_deg(pifc.obsra.iloc[i], pifc.obsdec.iloc[i])
pifc.iloc[i, pifc.columns.get_loc('radeg')] = '{:.2f}'.format(radeg)
pifc.iloc[i, pifc.columns.get_loc('decdeg')] = '{:.2f}'.format(decdeg)
# now we can compare using degrees
confs = pifc.groupby(['slit',
'radeg',
'decdeg',
'grating',
'cam_targ',
'grt_targ',
'filter',
'filter2',
'gain',
'rdnoise']).size().reset_index().rename(
columns={0: 'count'})
for i in confs.index:
spec_group = pifc[((pifc['slit'] == confs.iloc[i]['slit']) &
(pifc['radeg'] == confs.iloc[i]['radeg']) &
(pifc['decdeg'] == confs.iloc[i]['decdeg']) &
(pifc['grating'] == confs.iloc[i]['grating']) &
(pifc['cam_targ'] == confs.iloc[i]['cam_targ']) &
(pifc['grt_targ'] == confs.iloc[i]['grt_targ']) &
(pifc['filter'] == confs.iloc[i]['filter']) &
(pifc['filter2'] == confs.iloc[i]['filter2']) &
(pifc['gain'] == confs.iloc[i]['gain']) &
(pifc['rdnoise'] == confs.iloc[i]['rdnoise']))]
group_obstype = spec_group.obstype.unique()
if any([value in ['COMP', 'ARC'] for value in group_obstype]) and \
len(group_obstype) == 1:
log.debug('Adding COMP group')
data_container.add_comp_group(comp_group=spec_group)
elif any([value in ['OBJECT', 'SPECTRUM'] for value in group_obstype]) \
and len(group_obstype) == 1:
log.debug('Adding OBJECT group')
data_container.add_object_group(object_group=spec_group)
else:
log.debug('Adding OBJECT-COMP group')
data_container.add_spec_group(spec_group=spec_group)
return data_container
def combine_data(image_list, dest_path, prefix=None, output_name=None,
method="median",
save=False):
"""Combine a list of :class:`~astropy.nddata.CCDData` instances.
Args:
image_list (list): Each element should be an instance of
:class:`~astropy.nddata.CCDData`
dest_path (str): Path to where the new image should saved
prefix (str): Prefix to add to the image file name
output_name (str): Alternatively a file name can be parsed, this will
ignore `prefix`.
method (str): Method for doing the combination, this goes straight to
the call of `ccdproc.combine` function.
save (bool): If True will save the combined images. If False it will
ignore `prefix` or `output_name`.
Returns:
A combined image as a :class:`~astropy.nddata.CCDData` object.
"""
assert len(image_list) > 1
combined_full_path = os.path.join(dest_path, 'combined_file.fits')
if output_name is not None:
combined_full_path = os.path.join(dest_path, output_name)
elif prefix is not None:
sample_image_name = random.choice(image_list).header['GSP_FNAM']
splitted_name = sample_image_name.split('_')
splitted_name[0] = re.sub('_', '', prefix)
splitted_name[1] = 'comb'
splitted_name[-1] = re.sub('.fits', '', splitted_name[-1])
combined_base_name = "_".join(splitted_name)
number = len(glob.glob(
os.path.join(dest_path,
combined_base_name + "*.fits")))
combined_full_path = os.path.join(
dest_path,
combined_base_name + "_{:03d}.fits".format(number + 1))
# combine image
combined_image = ccdproc.combine(image_list,
method=method,
sigma_clip=True,
sigma_clip_low_thresh=1.0,
sigma_clip_high_thresh=1.0,
add_keyword=False)
# add name of files used in the combination process
for i in range(len(image_list)):
image_name = image_list[i].header['GSP_FNAM']
new_image_name = '_' + image_name
if os.path.isfile(os.path.join(dest_path, image_name)):
write_fits(image_list[i],
full_path=os.path.join(dest_path,
new_image_name))
log.info("Deleting file {}".format(image_name))
os.unlink(os.path.join(dest_path, image_name))
else:
log.error("File {} does not exists".format(
os.path.join(dest_path,
image_name)))
combined_image.header.set("GSP_IC{:02d}".format(i + 1),
value=new_image_name,
comment='Image used to create combined')
if save:
write_fits(combined_image,
full_path=combined_full_path,
combined=True)
log.info("Saved combined file to {}".format(combined_full_path))
return combined_image
def convert_time(in_time):
"""Converts time to seconds since epoch
Args:
in_time (str): time obtained from header's keyword DATE-OBS
Returns:
time in seconds since epoch
"""
return calendar.timegm(time.strptime(in_time, "%Y-%m-%dT%H:%M:%S.%f"))
def dcr_cosmicray_rejection(data_path, in_file, prefix,
keep_cosmic_files=False, save=True):
"""Runs an external code for cosmic ray rejection
DCR was created by <NAME> and the code can be obtained from
http://users.camk.edu.pl/pych/DCR/ and is written in C. Contrary to
ccdproc's LACosmic it actually applies the correction, and also doesn't
update the mask attribute since it doesn't work with :class:`~astropy.nddata.CCDData` instances.
The binary takes three positional arguments, they are: 1. input image,
2. output image and 3. cosmic rays image. Also it needs that a dcr.par file
is located in the directory. All this is implemented in this function, if
`delete` is True it will remove the original image and the cosmic rays
image. The removal of the original image is absolutely safe when used in the
context of the goodman pipeline, however if you want to implement it
somewhere else, be careful.
Notes:
This function operates an external code therefore it doesn't return
anything natively, instead it creates a new image. A workaround has been
created that loads the new image and deletes the file.
Args:
data_path (str): Data location
in_file (str): Name of the file to have its cosmic rays removed
prefix (str): Prefix to add to the file with the cosmic rays removed
keep_cosmic_files (bool): True for deleting the input and cosmic ray
file.
save (bool): Toggles the option of saving the image.
"""
log.info('Removing cosmic rays using DCR by <NAME>')
log.debug('See http://users.camk.edu.pl/pych/DCR/')
# add the prefix for the output file
out_file = prefix + in_file
# define the name for the cosmic rays file
cosmic_file = 'cosmic_' + '_'.join(in_file.split('_')[1:])
# define full path for all the files involved
full_path_in = os.path.join(data_path, in_file)
full_path_out = os.path.join(data_path, out_file)
full_path_cosmic = os.path.join(data_path, cosmic_file)
# this is the command for running dcr, all arguments are required
command = 'dcr {:s} {:s} {:s}'.format(full_path_in,
full_path_out,
full_path_cosmic)
log.debug('DCR command:')
log.debug(command)
# get the current working directory to go back to it later in case the
# the pipeline has not been called from the same data directory.
cwd = os.getcwd()
# move to the directory were the data is, dcr is expecting a file dcr.par
os.chdir(data_path)
# call dcr
try:
dcr = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as error:
log.error(error)
sys.exit('Your system can not locate the executable file dcr, try '
'moving it to /bin or create a symbolic link\n\n\tcd /bin\n\t'
'sudo ln -s /full/path/to/dcr')
# return False
# if the process is taking too long to respond, kill it
# kill_process = lambda process: process.kill()
def kill_process(process): # pragma: no cover
log.error("DCR Timed out")
process.kill()
dcr_timer = Timer(10, kill_process, [dcr])
try:
dcr_timer.start()
stdout, stderr = dcr.communicate()
finally:
dcr_timer.cancel()
# wait for dcr to terminate
# dcr.wait()
# go back to the original directory. Could be the same.
os.chdir(cwd)
# If no error stderr is an empty string
if stderr != b'':
log.error(stderr)
if b'dcr: not found' in stderr:
sys.exit('Your system can not locate the executable file dcr, try '
'moving it to /bin or create a symbolic link\n\n\tcd '
'/bin\n\tsudo ln -s /full/path/to/dcr')
elif b'ERROR' in stdout:
for output_line in stdout.split(b'\n'):
log.error(output_line.decode("utf-8"))
else:
for output_line in stdout.split(b'\n'):
log.debug(output_line)
# delete extra files only if the execution ended without error
if not keep_cosmic_files and stderr == b'' and b'USAGE:' not in stdout \
and b'ERROR! calc_submean() failed' not in stdout:
try:
log.warning('Removing input file: {:s}'.format(full_path_in))
os.unlink(full_path_in)
except OSError as error:
log.error(error)
try:
log.warning(
'Removing cosmic rays file: {:s}'.format(full_path_cosmic))
os.unlink(full_path_cosmic)
except OSError as error:
log.error(error)
# recovers the saved file and returns the :class:`~astropy.nddata.CCDData`
# instance
if os.path.isfile(full_path_out):
ccd = ccdproc.CCDData.read(full_path_out, unit=u.adu)
if not save:
log.warning("Removing file because the attribute 'save' "
"is set to False")
os.unlink(full_path_out)
return ccd
def define_trim_section(sample_image, technique):
"""Get the initial trim section
The initial trim section is usually defined in the header with the
keyword ``TRIMSEC`` but in the case of Goodman HTS this does not work well.
In particular for spectroscopy where is more likely to have combined
binning and so on.
Args:
sample_image (str): Full path to sample image.
technique (str): The name of the technique, the options are:
Imaging or Spectroscopy.
Returns:
The trim section in the format ``[x1:x2, y1:y2]``
"""
assert os.path.isabs(os.path.dirname(sample_image))
assert os.path.isfile(sample_image)
trim_section = None
# TODO (simon): Consider binning and possibly ROIs for trim section
log.warning('Determining trim section. Assuming you have only one '
'kind of data in this folder')
ccd = read_fits(sample_image, technique=technique)
# serial binning - dispersion binning
# parallel binning - spatial binning
spatial_length, dispersion_length = ccd.data.shape
serial_binning, \
parallel_binning = [int(x) for x
in ccd.header['CCDSUM'].split()]
# Trim section is valid for Blue and Red Camera Binning 1x1 and
# Spectroscopic ROI
if technique == 'Spectroscopy':
# left
low_lim_spectral = int(np.ceil(51. / serial_binning))
# right
high_lim_spectral = int(4110 / serial_binning)
# bottom
low_lim_spatial = 2
# top
# t = int(1896 / parallel_binning)
# TODO (simon): Need testing
# trim_section = '[{:d}:{:d},{:d}:{:d}]'.format(l, r, b, t)
trim_section = '[{:d}:{:d},{:d}:{:d}]'.format(
low_lim_spectral,
high_lim_spectral,
low_lim_spatial,
spatial_length)
elif technique == 'Imaging':
trim_section = ccd.header['TRIMSEC']
log.info('Trim Section: %s', trim_section)
return trim_section
def extraction(ccd,
target_trace,
spatial_profile,
extraction_name):
"""Calls appropriate spectrum extraction routine
This function calls the appropriate extraction function based on
`extraction_name`
Notes:
Optimal extraction is not implemented.
Args:
ccd (CCDData): Instance of :class:`~astropy.nddata.CCDData` containing a
2D spectrum
target_trace (object): Instance of astropy.modeling.Model, a low order
polynomial that defines the trace of the spectrum in the ccd object.
spatial_profile (Model): Instance of :class:`~astropy.modeling.Model`,
a Gaussian model previously fitted to the spatial profile of the 2D
spectrum contained in the ccd object.
extraction_name (str): Extraction type, can be `fractional` or
`optimal` though the optimal extraction is not implemented yet.
Returns:
ccd (CCDData): Instance of :class:`~astropy.nddata.CCDData` containing a
1D spectrum. The attribute 'data' is replaced by the 1D array resulted
from the extraction process.
Raises:
NotImplementedError: When `extraction_name` is `optimal`.
"""
assert isinstance(ccd, ccdproc.CCDData)
assert isinstance(target_trace, Model)
if spatial_profile.__class__.name == 'Gaussian1D':
target_fwhm = spatial_profile.fwhm
elif spatial_profile.__class__.name == 'Moffat1D':
target_fwhm = spatial_profile.fwhm
else:
raise NotImplementedError
if extraction_name == 'fractional':
extracted, background, bkg_info = extract_fractional_pixel(
ccd=ccd,
target_trace=target_trace,
target_fwhm=target_fwhm,
extraction_width=2)
background_1, background_2 = bkg_info
if background_1 is not None:
log.info('Background extraction zone 1: {:s}'.format(background_1))
extracted.header.set('GSP_BKG1', value=background_1)
else:
log.info("Background extraction zone 1: 'none'")
if background_2 is not None:
log.info('Background extraction zone 2: {:s}'.format(background_2))
extracted.header.set('GSP_BKG2', value=background_2)
else:
log.info("Background extraction zone 2: 'none'")
return extracted
elif extraction_name == 'optimal':
raise NotImplementedError
def extract_fractional_pixel(ccd, target_trace, target_fwhm, extraction_width,
background_spacing=3):
"""Performs an spectrum extraction using fractional pixels.
Args:
ccd (CCDData) Instance of :class:`~astropy.nddata.CCDData` that
contains a 2D spectrum.
target_trace (object): Instance of astropy.modeling.models.Model that
defines the trace of the target on the image (ccd).
target_fwhm (float): FWHM value for the spatial profile
fitted to the target.
extraction_width (int): Width of the extraction area as a function of
`target_fwhm`. For instance if `extraction_with` is set to 1 the
function extract 0.5 to each side from the center of the traced
target.
background_spacing (float): Number of `target_stddev` to separate the
target extraction to the background. This is from the edge of the
extraction zone to the edge of the background region.
"""
assert isinstance(ccd, ccdproc.CCDData)
assert isinstance(target_trace, Model)
log.info("Fractional Pixel Extraction for "
"{:s}".format(ccd.header['GSP_FNAM']))
spat_length, disp_length = ccd.data.shape
disp_axis = range(disp_length)
trace_points = target_trace(disp_axis)
apnum1 = None
background_info_1 = None
background_info_2 = None
non_background_sub = []
extracted_spectrum = []
background_list = []
if ccd.header['OBSTYPE'] not in ['OBJECT', 'SPECTRUM']:
log.debug("No background subtraction for OBSTYPE = "
"{:s}".format(ccd.header['OBSTYPE']))
for i in disp_axis:
# this defines the extraction limit for every column
low_limit = trace_points[i] - 0.5 * extraction_width * target_fwhm
high_limit = trace_points[i] + 0.5 * extraction_width * target_fwhm
if apnum1 is None:
# TODO (simon): add secondary targets
apnum1 = '{:d} {:d} {:.2f} {:.2f}'.format(1,
1,
low_limit,
high_limit)
ccd.header.set('APNUM1',
value=apnum1,
comment="Aperture in first column")
ccd.header.set('GSP_EXTR',
value="{:.2f}:{:.2f}".format(low_limit,
high_limit))
log.info("Extraction aperture in first column: {:s}".format(
ccd.header['GSP_EXTR']))
column_sum = fractional_sum(data=ccd.data,
index=i,
low_limit=low_limit,
high_limit=high_limit)
non_background_sub.append(column_sum)
if ccd.header['OBSTYPE'] in ['OBJECT', 'SPECTRUM']:
# background limits
# background_spacing is the distance from the edge of the target's
# limits defined by `int_low_limit` and
# `int_high_limit` in stddev units
background_width = high_limit - low_limit
# define pixel values for background subtraction
# low_background_zone
high_1 = low_limit - background_spacing * target_fwhm
low_1 = high_1 - background_width
# High background zone
low_2 = high_limit + background_spacing * target_fwhm
high_2 = low_2 + background_width
# validate background subtraction zones
background_1 = None
background_2 = None
# this has to be implemented, leaving it True assumes there is no
# restriction for background selection.
# TODO (simon): Implement background subtraction zones validation
neighbouring_target_condition = True
if low_1 > 0 and neighbouring_target_condition:
# integer limits
background_1 = fractional_sum(data=ccd.data,
index=i,
low_limit=low_1,
high_limit=high_1)
else:
log.error("Invalid Zone 1: [{}:{}]".format(low_1, high_1))
if high_2 < spat_length and neighbouring_target_condition:
background_2 = fractional_sum(data=ccd.data,
index=i,
low_limit=low_2,
high_limit=high_2)
else:
log.error("Invalid Zone 2: [{}:{}]".format(low_2, high_2))
# background = 0
if background_1 is not None and background_2 is None:
background = background_1
if background_info_1 is None:
background_info_1 = "{:.2f}:{:.2f} column {:d}".format(
low_1, high_1, i+1)
elif background_1 is None and background_2 is not None:
background = background_2
if background_info_2 is None:
background_info_2 = "{:.2f}:{:.2f} column {:d}".format(
low_2, high_2, i+1)
else:
background = np.mean([background_1, background_2])
if background_info_1 is None:
background_info_1 = "{:.2f}:{:.2f} column {:d}".format(
low_1, high_1, i+1)
if background_info_2 is None:
background_info_2 = "{:.2f}:{:.2f} column {:d}".format(
low_2, high_2, i+1)
# actual background subtraction
background_subtracted_column_sum = column_sum - background
# append column value to list
extracted_spectrum.append(background_subtracted_column_sum)
background_list.append(background)
else:
extracted_spectrum.append(column_sum)
new_ccd = ccd.copy()
new_ccd.data = np.asarray(extracted_spectrum)
log.warning("Setting mask to None, otherwise saving will fail.")
new_ccd.mask = None
if new_ccd.header['NAXIS'] != 1:
for i in range(int(new_ccd.header['NAXIS']), 1, -1):
new_ccd.header.remove(keyword="NAXIS{:d}".format(i))
new_ccd.header.set('NAXIS', value=1)
return new_ccd, np.asarray(background_list), [background_info_1,
background_info_2]
def extract_optimal():
"""Placeholder for optimal extraction method.
Raises:
NotImplementedError
"""
raise NotImplementedError
def evaluate_wavelength_solution(clipped_differences):
"""Calculates Root Mean Square Error for the wavelength solution.
Args:
clipped_differences (ndarray): Numpy masked array of differences
between reference line values in angstrom and the value calculated
using the model of the wavelength solution.
Returns:
Root Mean Square Error, number of points and number of points
rejected in the calculation of the wavelength solution.
"""
n_points = len(clipped_differences)
n_rejections = np.ma.count_masked(clipped_differences)
square_differences = []
for i in range(len(clipped_differences)):
if clipped_differences[i] is not np.ma.masked:
square_differences.append(clipped_differences[i] ** 2)
rms_error = np.sqrt(
np.sum(square_differences) / len(square_differences))
log.info('Wavelength solution RMS Error : {:.3f}'.format(
rms_error))
return rms_error, n_points, n_rejections
def fix_keywords(path, pattern='*.fits'):
"""Fix FITS uncompliance of some keywords
Uses automatic header fixing by :class:`~astropy.nddata.CCDData`. Note that
this only fixes FITS compliance.
Args:
path (str): Path to raw data
pattern (str): Search pattern for listing file in path.
"""
file_list = glob.glob(os.path.join(path, pattern))
for _file in file_list:
log.info("Fixing file {:s}".format(_file))
ccd = ccdproc.CCDData.read(_file, unit='adu')
ccd.write(_file, overwrite=True)
log.info("Fix succeeded!")
def fractional_sum(data, index, low_limit, high_limit):
"""Performs a fractional pixels sum
A fractional pixels sum is required several times while
extracting a 1D spectrum from a 2D spectrum. The method
is actually very simple.
It requires the full data, the column and the range to sum, this
range is given as real numbers. First it separates the limits values as an
integer and fractional parts. Then it will sum the integer's interval and
subtract the `low_limit`'s fractional part and sum the `high_limit`'s
fractional part.
The sum is performed in one operation. It does not do
background subtraction, for which this very same method is used to
get the background sum to be subtracted later.
Args:
data (numpy.ndarray): 2D array that contains the 2D spectrum/image
index (int): Index of the column to be summed.
low_limit (float): Lower limit for the range to be summed.
high_limit (float): Higher limit for the range to be summed.
Returns:
Sum in ADU of all pixels and fractions between `low_limit` and
`high_limit`.
"""
# these are the limits within the full amount of flux on each pixel is
# summed
low_fraction, low_integer = math.modf(low_limit)
high_fraction, high_integer = math.modf(high_limit)
column_sum = np.sum(data[int(low_integer):int(high_integer), index]) - \
data[int(low_integer), index] * low_fraction + \
data[int(high_integer), index] * high_fraction
return column_sum
def get_best_flat(flat_name, path):
"""Look for matching master flat
Given a basename for master flats defined as a combination of key parameters
extracted from the header of the image that we want to flat field, this
function will find the name of the files that matches the base name and then
will choose the first. Ideally this should go further as to check signal,
time gap, etc.
After it identifies the file it will load it using
:class:`~astropy.nddata.CCDData` and return it along the filename.
In the case it fails it will return None instead of master_flat and another
None instead of master_flat_name.
Args:
flat_name (str): Full path of master flat basename. Ends in '*.fits'
for using glob.
path (str): Location to look for flats.
Returns:
master_flat (object): A :class:`~astropy.nddata.CCDData` instance.
master_flat_name (str): Full path to the chosen master flat.
"""
flat_list = glob.glob(os.path.join(path, flat_name))
log.debug('Flat base name {:s}'.format(flat_name))
log.debug('Matching master flats found: {:d}'.format(len(flat_list)))
if len(flat_list) > 0:
master_flat_name = flat_list[0]
# if len(flat_list) == 1:
# master_flat_name = flat_list[0]
# else:
# master_flat_name = flat_list[0]
# elif any('dome' in flat for flat in flat_list):
# master_flat_name =
master_flat = ccdproc.CCDData.read(master_flat_name, unit=u.adu)
log.debug('Found suitable master flat: {:s}'.format(master_flat_name))
return master_flat, master_flat_name
else:
log.error('There is no flat available')
return None, None
def get_central_wavelength(grating, grt_ang, cam_ang):
"""Calculates the central wavelength for a given spectroscopic mode
The equation used to calculate the central wavelength is the following
.. math::
\\lambda_{central} = \\frac{1e6}{GRAT}
\\sin\\left(\\frac{\\alpha \\pi}{180}\\right) +
\\sin\\left(\\frac{\\beta \\pi}{180}\\right)
Args:
grating (str): Grating frequency as a string. Example '400'.
grt_ang (str): Grating Angle as a string. Example '12.0'.
cam_ang (str): Camera Angle as a string. Example '20.0'
Returns:
central_wavelength (float): Central wavelength as a float value.
"""
grating_frequency = float(grating) / u.mm
grt_ang = float(grt_ang) * u.deg
cam_ang = float(cam_ang) * u.deg
alpha = grt_ang.to(u.rad)
beta = cam_ang.to(u.rad) - grt_ang.to(u.rad)
# central_wavelength = (1e6 / grating_frequency) * \
# (np.sin(alpha * np.pi / 180.) +
# np.sin(beta * np.pi / 180.))
central_wavelength = (np.sin(alpha) + np.sin(beta)) / grating_frequency
central_wavelength = central_wavelength.to(u.angstrom)
log.debug('Found {:.3f} as central wavelength'.format(central_wavelength))
return central_wavelength
def get_lines_in_lamp(ccd, plots=False):
"""Identify peaks in a lamp spectrum
Uses `signal.argrelmax` to find peaks in a spectrum i.e emission
lines, then it calls the recenter_lines method that will recenter them
using a "center of mass", because, not always the maximum value (peak)
is the center of the line.
Args:
ccd (CCDData): Lamp `ccdproc.CCDData` instance.
plots (bool): Wether to plot or not.
Returns:
lines_candidates (list): A common list containing pixel values at
approximate location of lines.
"""
if isinstance(ccd, ccdproc.CCDData):
lamp_data = ccd.data
lamp_header = ccd.header
raw_pixel_axis = range(len(lamp_data))
else:
log.error('Error receiving lamp')
return None
no_nan_lamp_data = np.asarray(np.nan_to_num(lamp_data))
filtered_data = np.where(
np.abs(no_nan_lamp_data > no_nan_lamp_data.min() +
0.03 * no_nan_lamp_data.max()),
no_nan_lamp_data,
None)
# replace None to zero and convert it to an array
none_to_zero = [0 if it is None else it for it in filtered_data]
filtered_data = np.array(none_to_zero)
_upper_limit = no_nan_lamp_data.min() + 0.03 * no_nan_lamp_data.max()
slit_size = np.float64(re.sub('[a-zA-Z"_*]', '', lamp_header['slit']))
serial_binning, parallel_binning = [
int(x) for x in lamp_header['CCDSUM'].split()]
new_order = int(round(float(slit_size) / (0.15 * serial_binning)))
log.debug('New Order: {:d}'.format(new_order))
peaks = signal.argrelmax(filtered_data, axis=0, order=new_order)[0]
if slit_size >= 5.:
lines_center = recenter_broad_lines(
lamp_data=no_nan_lamp_data,
lines=peaks,
order=new_order)
else:
# lines_center = peaks
lines_center = recenter_lines(no_nan_lamp_data, peaks)
if plots: # pragma: no cover
plt.close('all')
fig, ax = plt.subplots()
fig.canvas.set_window_title('Lines Detected')
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title('Lines detected in Lamp\n'
'{:s}'.format(lamp_header['OBJECT']))
ax.set_xlabel('Pixel Axis')
ax.set_ylabel('Intensity (counts)')
# Build legends without data to avoid repetitions
ax.plot([], color='k', label='Comparison Lamp Data')
ax.plot([], color='k', linestyle=':',
label='Spectral Line Detected')
ax.axhline(_upper_limit, color='r')
for line in peaks:
ax.axvline(line, color='k', linestyle=':')
ax.plot(raw_pixel_axis, no_nan_lamp_data, color='k')
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return lines_center
def get_overscan_region(sample_image, technique):
"""Get the right overscan region for spectroscopy
It works for the following ROI:
Spectroscopic 1x1
Spectroscopic 2x2
Spectroscopic 3x3
The limits where measured on a Spectroscopic 1x1 image and then divided
by the binning size. This was checked
that it actually works as expected.
Notes:
The regions are 1-based i.e. different to Python convention.
For Imaging there is no overscan region.
Args:
sample_image (str): Full path to randomly chosen image.
technique (str): Observing technique, either `Spectroscopy` or
`Imaging`
Returns:
overscan_region (str) Region for overscan in the format
'[min:max,:]' where min is the starting point and max is the end
point of the overscan region.
"""
assert os.path.isabs(os.path.dirname(sample_image))
assert os.path.isfile(sample_image)
log.debug('Overscan Sample File ' + sample_image)
ccd = ccdproc.CCDData.read(sample_image, unit=u.adu)
# Image height - spatial direction
spatial_length, dispersion_length = ccd.data.shape
# Image width - spectral direction
# w = ccd.data.shape[1]
# Take the binnings
serial_binning, parallel_binning = \
[int(x) for x in ccd.header['CCDSUM'].split()]
if technique == 'Spectroscopy':
log.info('Overscan regions has been tested for ROI '
'Spectroscopic 1x1, 2x2 and 3x3')
# define l r b and t to avoid local variable might be
# defined before assignment warning
low_lim_spectral, \
high_lim_spectral, \
low_lim_spatial, \
high_lim_spatial = [None] * 4
if ccd.header['INSTCONF'] == 'Red':
# for red camera it is necessary to eliminate the first
# rows/columns (depends on the point of view) because
# they come with an abnormal high signal. Usually the
# first 5 pixels. In order to find the corresponding
# value for the subsequent binning divide by the
# binning size.
# The numbers 6 and 49 where obtained from visual
# inspection
# left
low_lim_spectral = int(np.ceil(6. / serial_binning))
# right
high_lim_spectral = int(49. / serial_binning)
# bottom
low_lim_spatial = 1
# top
high_lim_spatial = spatial_length
elif ccd.header['INSTCONF'] == 'Blue':
# 16 is the length of the overscan region with no
# binning.
# left
low_lim_spectral = 1
# right
high_lim_spectral = int(16. / serial_binning)
# bottom
low_lim_spatial = 1
# top
high_lim_spatial = spatial_length
overscan_region = '[{:d}:{:d},{:d}:{:d}]'.format(
low_lim_spectral,
high_lim_spectral,
low_lim_spatial,
high_lim_spatial)
elif technique == 'Imaging':
log.warning("Imaging mode doesn't have overscan "
"region. Use bias instead.")
overscan_region = None
else:
overscan_region = None
log.info('Overscan Region: %s', overscan_region)
return overscan_region
def get_slit_trim_section(master_flat, debug_plots=False):
"""Find the slit edges to trim all data
Using a master flat, ideally with good signal to noise ratio, this function
will identify the edges of the slit projected into the detector. Having this
done will allow to reduce the overall processing time and also reduce the
introduction of artifacts due to non-illuminated regions in the detectors,
such as NaNs -INF +INF, etc.
Args:
master_flat (CCDData): A :class:`~astropy.nddata.CCDData` instance.
debug_plots (bool): Flag to show debugging plots
Returns:
slit_trim_section (str): Trim section in spatial direction in the format
[:,slit_lower_limit:slit_higher_limit]
"""
x, y = master_flat.data.shape
# Using the middle point to make calculations, usually flats have good
# illumination already at the middle.
middle = int(y / 2.)
ccd_section = master_flat.data[:, middle:middle + 200]
ccd_section_median = np.median(ccd_section, axis=1)
spatial_axis = range(len(ccd_section_median))
# set values for initial box model definition
box_max = np.max(ccd_section_median)
box_center = len(ccd_section_median) / 2.
box_width = .75 * len(ccd_section_median)
# box model definition
box_model = models.Box1D(amplitude=box_max, x_0=box_center, width=box_width)
box_fitter = fitting.SimplexLSQFitter()
fitted_box = box_fitter(box_model, spatial_axis, ccd_section_median)
# the number of pixels that will be removed from the detected edge of the
# image on each side
offset = 10
if fitted_box.width.value < x:
log.debug("Slit detected. Adding a 10 pixels offset")
else:
log.debug("Slit limits not detected. Setting additional "
"offset to 0")
offset = 0
# Here we force the slit limits within the boundaries of the data (image)
# this defines a preliminary set of slit limit
l_lim = 1 + fitted_box.x_0.value - 0.5 * fitted_box.width.value + offset
h_lim = 1 + fitted_box.x_0.value + 0.5 * fitted_box.width.value - offset
low_lim = int(np.max([1 + offset, l_lim + 1]))
high_lim = int(np.min([h_lim, len(ccd_section_median) - offset]))
# define the slit trim section as (IRAF)
# convert o 1-based
slit_trim_section = '[1:{:d},{:d}:{:d}]'.format(y,
low_lim,
high_lim)
log.debug("Slit Trim Section: {:s}".format(slit_trim_section))
# debugging plots that have to be manually turned on
if debug_plots: # pragma: no cover
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
plt.title('Slit Edge Detection')
plt.plot(box_model(spatial_axis), color='c', label='Initial Box1D')
plt.plot(fitted_box(spatial_axis), color='k', label='Fitted Box1D')
plt.plot(ccd_section_median, label='Median Along Disp.')
# plt.plot(pseudo_derivative, color='g', label='Pseudo Derivative')
# plt.axvline(None, color='r', label='Detected Edges')
# -1 to make it zero-based.
plt.axvline(low_lim - 1, color='r', label='Detected Edges')
plt.axvline(high_lim - 1, color='r')
# for peak in peaks:
# plt.axvline(peak, color='r')
plt.legend(loc='best')
plt.show()
return slit_trim_section
def get_spectral_characteristics(ccd, pixel_size, instrument_focal_length):
"""Calculates some Goodman's specific spectroscopic values.
From the header value for Grating, Grating Angle and Camera Angle it is
possible to estimate what are the wavelength values at the edges as well
as in the center. It was necessary to add offsets though, since the
formulas provided are slightly off. The values are only an estimate.
Args:
ccd (CCDData): Lamp `ccdproc.CCDData` instance
pixel_size (float): Pixel size in microns
instrument_focal_length (float): Instrument focal length
Returns:
spectral_characteristics (dict): Contains the following parameters:
center: Center Wavelength
blue: Blue limit in Angstrom
red: Red limit in Angstrom
alpha: Angle
beta: Angle
pix1: Pixel One
pix2: Pixel Two
"""
# TODO (simon): find a definite solution for this, this only work
# TODO (simon): (a little) for one configuration
blue_correction_factor = -50 * u.angstrom
red_correction_factor = -37 * u.angstrom
grating_frequency = float(re.sub('[A-Za-z_-]',
'',
ccd.header['GRATING'])) / u.mm
grating_angle = float(ccd.header['GRT_ANG']) * u.deg
camera_angle = float(ccd.header['CAM_ANG']) * u.deg
# serial binning - dispersion binning
# parallel binning - spatial binning
serial_binning, parallel_binning = [
int(x) for x in ccd.header['CCDSUM'].split()]
pixel_count = len(ccd.data)
# Calculations
# TODO (simon): Check whether is necessary to remove the
# TODO (simon): slit_offset variable
alpha = grating_angle.to(u.rad)
beta = camera_angle.to(u.rad) - grating_angle.to(u.rad)
center_wavelength = (np.sin(alpha) + np.sin(beta)) / grating_frequency
limit_angle = np.arctan(pixel_count * ((pixel_size * serial_binning) / instrument_focal_length) / 2)
blue_limit = ((np.sin(alpha) + np.sin(beta - limit_angle.to(u.rad))) / grating_frequency).to(u.angstrom) + blue_correction_factor
red_limit = ((np.sin(alpha) + np.sin(beta + limit_angle.to(u.rad))) / grating_frequency).to(u.angstrom) + red_correction_factor
pixel_one = 0
pixel_two = 0
log.debug(
'Center Wavelength : {:.3f} Blue Limit : '
'{:.3f} Red Limit : {:.3f} '.format(center_wavelength.to(u.angstrom),
blue_limit,
red_limit))
spectral_characteristics = {'center': center_wavelength,
'blue': blue_limit,
'red': red_limit,
'alpha': alpha,
'beta': beta,
'pix1': pixel_one,
'pix2': pixel_two}
return spectral_characteristics
def get_twilight_time(date_obs):
"""Get end/start time of evening/morning twilight
Notes:
Taken from <NAME>'s development
Args:
date_obs (list): List of all the dates from data.
Returns:
twilight_evening (str): Evening twilight time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
twilight_morning (str): Morning twilight time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
sun_set_time (str): Sun set time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
sun_rise_time (str): Sun rise time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
"""
# observatory(str): Observatory name.
observatory = 'SOAR Telescope'
geodetic_location = ['-70d44m01.11s', '-30d14m16.41s', 2748]
# longitude (str): Geographic longitude in string format
longitude = geodetic_location[0]
# latitude (str): Geographic latitude in string format.
latitude = geodetic_location[1]
# elevation (int): Geographic elevation in meters above sea level
elevation = geodetic_location[2]
# timezone (str): Time zone.
timezone = 'UTC'
# description(str): Observatory description
description = 'Soar Telescope on Cerro Pachon, Chile'
soar_loc = EarthLocation.from_geodetic(longitude,
latitude,
elevation * u.m,
ellipsoid='WGS84')
soar = Observer(name=observatory,
location=soar_loc,
timezone=timezone,
description=description)
time_first_frame, time_last_frame = Time(min(date_obs)), Time(
max(date_obs))
twilight_evening = soar.twilight_evening_astronomical(
Time(time_first_frame), which='nearest').isot
twilight_morning = soar.twilight_morning_astronomical(
Time(time_last_frame), which='nearest').isot
sun_set_time = soar.sun_set_time(
Time(time_first_frame), which='nearest').isot
sun_rise_time = soar.sun_rise_time(
Time(time_last_frame), which='nearest').isot
log.debug('Sun Set ' + sun_set_time)
log.debug('Sun Rise ' + sun_rise_time)
return (twilight_evening,
twilight_morning,
sun_set_time,
sun_rise_time)
def identify_targets(ccd,
fit_model,
background_threshold,
nfind=3,
profile_min_width=None,
profile_max_width=None,
plots=False):
"""Identify Spectroscopic Targets
Wrapper to the class `IdentifySpectroscopicTargets`.
Args:
ccd (CCDData): Image containing spectra
fit_model (str): Name of the model to be fitted `moffat` or `gaussian`.
background_threshold (int): Number of background levels for target
discrimination.
nfind (int): Maximum number of targets passing the background threshold
to be returned, they are order from most intense peak to least intense.
profile_min_width (float): Minimum FWHM (moffat) or STDDEV (gaussian) for spatial profile model.
profile_max_width (float): Maximum FWHM (moffat) or STDDEV (gaussian) for spatial profile model.
plots (bool): Flat for plotting results.
Returns:
identified_targets (list): List of models successfully fitted.
"""
identify = IdentifySpectroscopicTargets()
identified_targets = identify(ccd=ccd,
nfind=nfind,
background_threshold=background_threshold,
model_name=fit_model,
profile_min_width=profile_min_width,
profile_max_width=profile_max_width,
plots=plots)
return identified_targets
def identify_technique(target, obstype, slit, grating, wavmode, roi):
"""Identify whether is Imaging or Spectroscopic data
Args:
target (str): Target name as in the keyword `OBJECT` this is useful in
Automated aquisition mode, such as AEON.
obstype (str): Observation type as in `OBSTYPE`
slit (str): Value of `SLIT` keyword.
grating (str): Value of `GRATING` keyword.
wavmode (str): Value of `WAVMODE` keyword.
roi (str): Value of `ROI` keyword.
Returns:
Observing technique as a string. Either `Imaging` or `Spectroscopy`.
"""
if 'Spectroscopic' in roi or \
obstype in ['ARC', 'SPECTRUM', 'COMP'] or \
slit not in ['NO_MASK', '<NO MASK>'] or \
grating not in ['NO_GRATING', '<NO GRATING>'] or \
'_SP_' in target:
technique = 'Spectroscopy'
elif 'Imaging' in roi or \
obstype in ['EXPOSE'] or\
wavmode == 'IMAGING' or '_IM_' in target:
technique = 'Imaging'
else:
technique = 'Unknown'
return technique
def image_overscan(ccd, overscan_region, add_keyword=False):
"""Apply overscan correction to data
Uses ccdproc.subtract_overscan to perform the task.
Notes:
The overscan_region argument uses FITS convention, just like IRAF,
therefore is 1 based. i.e. it starts in 1 not 0.
Args:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance to be
overscan corrected.
overscan_region (str): The overscan region in the format `[x1:x2,y1:y2]`
where x is the spectral axis and y is the spatial axis.
add_keyword (bool): Tells ccdproc whether to add a keyword or not.
Default False.
Returns:
ccd (CCDData) Overscan corrected :class:`~astropy.nddata.CCDData`
instance
"""
if overscan_region is not None:
log.debug(
'Applying overscan Correction: {:s}'.format(overscan_region))
ccd = ccdproc.subtract_overscan(ccd=ccd,
median=True,
fits_section=overscan_region,
add_keyword=add_keyword)
ccd.header['GSP_OVER'] = (overscan_region, 'Overscan region')
else:
log.debug("Overscan region is None, returning the original data.")
# ccd.header['GSP_OVER'] = ('none', 'Overscan region')
return ccd
def image_trim(ccd, trim_section, trim_type='trimsec', add_keyword=False):
"""Trim image to a given section
Notes:
The overscan_region argument uses FITS convention, just like IRAF,
therefore is 1 based. i.e. it starts in 1 not 0.
Args:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance.
trim_section (str): The trimming section in the format `[x1:x2,y1:y2]`
where x is the spectral axis and y is the spatial axis.
trim_type (str): trimsec or slit trim.
add_keyword (bool): Tells ccdproc whether to add a keyword or not.
Default False.
Returns:
ccd (CCDData) Trimmed :class:`~astropy.nddata.CCDData` instance
"""
if trim_section is not None:
ccd = ccdproc.trim_image(ccd=ccd,
fits_section=trim_section,
add_keyword=add_keyword)
if trim_type == 'trimsec':
ccd.header['GSP_TRIM'] = (trim_section, 'Trim section from TRIMSEC')
elif trim_type == 'slit':
ccd.header['GSP_SLIT'] = (trim_section,
'Slit trim section, slit illuminated '
'area only.')
else:
log.warning('Unrecognized trim type: {}'.format(trim_type))
ccd.header['GSP_TRIM'] = (trim_section,
'Image trimmed by unrecognized method: '
'{:s}'.format(trim_type))
else:
log.info("{:s} trim section is not "
"defined.".format(trim_type.capitalize()))
log.debug("Trim section is None, returning the same data.")
return ccd
def interpolate_spectrum(spectrum, interpolation_size):
"""Creates an interpolated version of the input spectrum
This method creates an interpolated version of the input array, it is
used mainly for a spectrum but it can also be used with any
unidimensional array. The reason for doing interpolation is
that it allows to find the lines and its respective center more
precisely.
Args:
spectrum (array): an uncalibrated spectrum or any unidimensional
array.
interpolation_size (int): Number of points to interpolate. (points added
between two existing ones)
Returns:
Two dimensional array containing x-axis and interpolated array.
The x-axis preserves original pixel values.
"""
x_axis = range(spectrum.size)
first_x = x_axis[0]
last_x = x_axis[-1]
new_x_axis = np.linspace(first_x,
last_x,
spectrum.size * interpolation_size)
tck = interpolate.splrep(x_axis, spectrum, s=0)
new_spectrum = interpolate.splev(new_x_axis, tck, der=0)
return [new_x_axis, new_spectrum]
def is_file_saturated(ccd, threshold):
"""Detects a saturated image
It counts the number of pixels above the saturation_threshold level, then finds
which percentage they represents and if it is above the threshold it
will return True. The percentage threshold can be set using the command
line argument ``--saturation_threshold``.
Args:
ccd (CCDData): Image to be tested for saturation_threshold
threshold (float): Percentage of saturated pixels allowed. Default 1.
Returns:
True for saturated and False for non-saturated
"""
saturation_values = SaturationValues()
pixels_above_saturation = np.count_nonzero(
ccd.data[np.where(
ccd.data > saturation_values.get_saturation_value(
ccd=ccd))])
total_pixels = np.count_nonzero(ccd.data)
saturated_percent = (pixels_above_saturation * 100.) / total_pixels
if saturated_percent >= float(threshold):
log.warning(
"The current image has more than {:.2f} percent "
"of pixels above saturation_threshold level".format(float(threshold)))
return True
else:
return False
def linearize_spectrum(data, wavelength_solution, plots=False):
"""Produces a linearized version of the spectrum
Storing wavelength solutions in a FITS header is not simple at all for
non-linear solutions therefore is easier for the final user and for the
development code to have the spectrum linearized. It first finds a
spline representation of the data, then creates a linear wavelength axis
(angstrom) and finally it resamples the data from the spline
representation to the linear wavelength axis.
It also applies a median filter of kernel size three to smooth the
linearized spectrum. Sometimes the splines produce funny things when
the original data is too steep.
Args:
data (Array): The non-linear spectrum
wavelength_solution (object): Mathematical model representing the
wavelength solution.
plots (bool): Whether to show the plots or not.
Returns:
linear_data (list): Contains two elements: Linear wavelength axis
and the smoothed linearized data itself.
"""
pixel_axis = range(len(data))
if any(np.isnan(data)):
log.error("there are nans")
sys.exit(0)
if wavelength_solution is not None:
x_axis = wavelength_solution(pixel_axis)
try: # pragma: no cover
plt.imshow(data)
plt.show()
except TypeError:
pass
new_x_axis = np.linspace(x_axis[0], x_axis[-1], len(data))
tck = interpolate.splrep(x_axis, data, s=0)
linearized_data = interpolate.splev(new_x_axis, tck, der=0)
smoothed_linearized_data = signal.medfilt(linearized_data)
if plots: # pragma: no cover
fig6 = plt.figure(6)
plt.xlabel('Wavelength (Angstrom)')
plt.ylabel('Intensity (Counts)')
fig6.canvas.set_window_title('Linearized Data')
plt.plot(x_axis,
data,
color='k',
label='Data')
plt.plot(new_x_axis,
linearized_data,
color='r',
linestyle=':',
label='Linearized Data')
plt.plot(new_x_axis,
smoothed_linearized_data,
color='m',
alpha=0.5,
label='Smoothed Linearized Data')
fig6.tight_layout()
plt.legend(loc=3)
plt.show()
fig7 = plt.figure(7)
plt.xlabel('Pixels')
plt.ylabel('Angstroms')
fig7.canvas.set_window_title('Wavelength Solution')
plt.plot(x_axis, color='b', label='Non linear wavelength-axis')
plt.plot(new_x_axis, color='r', label='Linear wavelength-axis')
fig7.tight_layout()
plt.legend(loc=3)
plt.show()
linear_data = [new_x_axis, smoothed_linearized_data]
return linear_data
def name_master_flats(header,
technique,
reduced_data,
sun_set,
sun_rise,
evening_twilight,
morning_twilight,
target_name='',
get=False):
"""Defines the name of a master flat or what master flat is compatible
with a given data
Given the header of a flat image this method will look for certain
keywords that are unique to a given instrument configuration therefore
they are used to discriminate compatibility.
It can be used to define a master flat's name when creating it or find
a base name to match existing master flat files thus finding a
compatible one for a given non-flat image.
Args:
header (object): Fits header. Instance of
:class:`~astropy.io.fits.header.Header`
technique (str): Observing technique, either Spectroscopy or
Imaging.
reduced_data (str): Full path to reduced data directory
sun_set (str): Sunset time formatted as "%Y-%m-%dT%H:%M:%S.%f"
sun_rise (str): Sunrise time formatted as "%Y-%m-%dT%H:%M:%S.%f"
evening_twilight (str): End of evening twilight formatted as
"%Y-%m-%dT%H:%M:%S.%f"
morning_twilight (str): Start of morning twilight in the format
"%Y-%m-%dT%H:%M:%S.%f"
target_name (str): Optional science target name to be added to the
master flat name.
get (bool): This option is used when trying to find a suitable
master flat for a given data.
Returns:
A master flat name, or basename to find a match among existing
files.
"""
master_flat_name = os.path.join(reduced_data, 'master_flat')
sunset = datetime.datetime.strptime(sun_set,
"%Y-%m-%dT%H:%M:%S.%f")
sunrise = datetime.datetime.strptime(sun_rise,
"%Y-%m-%dT%H:%M:%S.%f")
afternoon_twilight = datetime.datetime.strptime(evening_twilight,
"%Y-%m-%dT%H:%M:%S.%f")
morning_twilight = datetime.datetime.strptime(morning_twilight,
"%Y-%m-%dT%H:%M:%S.%f")
date_obs = datetime.datetime.strptime(header['DATE-OBS'],
"%Y-%m-%dT%H:%M:%S.%f")
if target_name != '':
target_name = '_' + target_name
if not get:
# TODO (simon): There must be a pythonic way to do this
if afternoon_twilight < date_obs < morning_twilight:
dome_sky = '_night'
elif (sunset < date_obs < afternoon_twilight) or \
(morning_twilight < date_obs < sunrise):
dome_sky = '_sky'
else:
dome_sky = '_dome'
else:
dome_sky = '*'
if technique == 'Spectroscopy':
if header['GRATING'] != '<NO GRATING>':
flat_grating = '_' + re.sub('[A-Za-z_ ]',
'',
header['GRATING'])
# self.spec_mode is an instance of SpectroscopicMode
spectroscopic_mode = SpectroscopicMode()
wavmode = spectroscopic_mode(header=header)
else:
flat_grating = '_no_grating'
wavmode = ''
flat_slit = re.sub('[A-Za-z_ ]',
'',
header['SLIT'])
filter2 = header['FILTER2']
if filter2 == '<NO FILTER>':
filter2 = ''
else:
filter2 = '_' + filter2
master_flat_name += target_name \
+ flat_grating \
+ wavmode \
+ filter2 \
+ '_' \
+ flat_slit \
+ dome_sky \
+ '.fits'
elif technique == 'Imaging':
if header['FILTER'] != 'NO_FILTER':
flat_filter = header['FILTER']
elif header['FILTER2'] != 'NO_FILTER':
flat_filter = header['FILTER2']
else:
flat_filter = "NO_FILTER"
flat_filter = re.sub('[- ]', '_', flat_filter)
flat_filter = re.sub('[<> ]', '', flat_filter)
master_flat_name += '_' + flat_filter + dome_sky + '.fits'
return master_flat_name
def normalize_master_flat(master, name, method='simple', order=15):
"""Master flat normalization method
This function normalize a master flat in three possible ways:
*mean*: simply divide the data by its mean
*simple*: Calculates the median along the spatial axis in order to obtain
the dispersion profile. Then fits a
:class:`~astropy.modeling.polynomial.Chebyshev1D` model and apply this to
all the data.
*full*: This is for experimental purposes only because it takes a lot of
time to process. It will fit a model to each line along the dispersion axis
and then divide it by the fitted model. I do not recommend this method
unless you have a good reason as well as a very powerful computer.
Args:
master (CCDData): Master flat. Has to be a
:class:`~astropy.nddata.CCDData` instance.
name (str): Full path of master flat prior to normalization.
method (str): Normalization method, 'mean', 'simple' or 'full'.
order (int): Order of the polynomial to be fitted.
Returns:
master (CCDData): The normalized master flat.
:class:`~astropy.nddata.CCDData` instance.
"""
assert isinstance(master, ccdproc.CCDData)
master = master.copy()
# define new name, base path and full new name
new_name = 'norm_' + os.path.basename(name)
path = os.path.dirname(name)
norm_name = os.path.join(path, new_name)
if method == 'mean':
log.debug('Normalizing by mean')
master.data /= master.data.mean()
master.header['GSP_NORM'] = ('mean', 'Flat normalization method')
elif method == 'simple' or method == 'full':
log.debug('Normalizing flat by {:s} model'.format(method))
# Initialize Fitting models and fitter
model_init = models.Chebyshev1D(degree=order)
model_fitter = fitting.LevMarLSQFitter()
# get data shape
x_size, y_size = master.data.shape
x_axis = range(y_size)
if method == 'simple':
# get profile along dispersion axis to fit a model to use for
# normalization
profile = np.median(master.data, axis=0)
# do the actual fit
fit = model_fitter(model_init, x_axis, profile)
# convert fit into an array
fit_array = fit(x_axis)
# pythonic way to divide an array by a vector
master.data = master.data / fit_array[None, :]
# master.header.add_history('Flat Normalized by simple model')
master.header['GSP_NORM'] = ('simple', 'Flat normalization method')
elif method == 'full':
log.warning('This part of the code was left here for '
'experimental purposes only')
log.warning('This procedure takes a lot to process, you might '
'want to see other method such as "simple" or '
'"mean".')
for i in range(x_size):
fit = model_fitter(model_init, x_axis, master.data[i])
master.data[i] = master.data[i] / fit(x_axis)
master.header['GSP_NORM'] = ('full', 'Flat normalization method')
# write normalized flat to a file
write_fits(ccd=master,
full_path=norm_name,
parent_file=name)
return master, norm_name
def ra_dec_to_deg(right_ascension, declination):
"""Converts right ascension and declination to degrees
Args:
right_ascension (str): Right ascension in the format hh:mm:ss.sss
declination (str): Declination in the format dd:mm:ss.sss
Returns:
right_ascension_deg (float): Right ascension in degrees
declination_deg (float): Declination in degrees
"""
right_ascension = right_ascension.split(":")
declination = declination.split(":")
# RIGHT ASCENSION conversion
right_ascension_deg = (float(right_ascension[0])
+ (float(right_ascension[1])
+ (float(right_ascension[2]) / 60.)) / 60.) * \
(360. / 24.)
# DECLINATION conversion
if float(declination[0]) == abs(float(declination[0])):
sign = 1
else:
sign = -1
declination_deg = sign * (abs(float(declination[0]))
+ (float(declination[1])
+ (float(declination[2]) / 60.)) / 60.)
return right_ascension_deg, declination_deg
def read_fits(full_path, technique='Unknown'):
"""Read fits files while adding important information to the header
It is necessary to record certain data to the image header so that's the
reason for this wrapper of :meth:`~astropy.nddata.CCDData.read` to exist.
It will add the following keywords. In most cases, if the keyword already
exist it will skip it except for `GSP_FNAM`, `GSP_PATH` and `BUNIT`.
GSP_VERS: Goodman Spectroscopic Pipeline version number
GSP_ONAM: Original File name
GSP_PNAM: Parent file name or name of the file from which this one
originated after some process or just a copy.
GSP_FNAM: Current file name.
GSP_PATH: Path to file at the moment of reading.
GSP_TECH: Observing technique. `Spectroscopy` or `Imaging`.
GSP_DATE: Date of first reading.
GSP_OVER: Overscan region.
GSP_TRIM: Trim section (region).
GSP_SLIT: Slit trim section, obtained from the slit illuminated area.
GSP_BIAS: Master bias image used. Default `none`.
GSP_FLAT: Master flat image used. Default `none`.
GSP_SCTR: Science target file
GSP_NORM: Flat normalization method.
GSP_COSM: Cosmic ray rejection method.
GSP_EXTR: Extraction window at first column
GSP_BKG1: First background extraction zone
GSP_BKG2: Second background extraction zone
GSP_WRMS: Wavelength solution RMS Error.
GSP_WPOI: Number of points used to calculate the wavelength solution
Error.
GSP_WREJ: Number of points rejected.
Args:
full_path (str): Full path to file.
technique (str): Observing technique. 'Imaging' or 'Spectroscopy'.
Returns:
Instance of :class:`~astropy.nddata.CCDData` corresponding to the file
from `full_path`.
"""
assert os.path.isfile(full_path)
ccd = ccdproc.CCDData.read(full_path, unit=u.adu)
all_keys = [key for key in ccd.header.keys()]
ccd.header.set('GSP_VERS',
value=__version__,
comment='Goodman Spectroscopic Pipeline Version')
if 'GSP_ONAM' not in all_keys:
ccd.header.set('GSP_ONAM',
value=os.path.basename(full_path),
comment='Original file name')
if 'GSP_PNAM' not in all_keys:
ccd.header.set('GSP_PNAM',
value=os.path.basename(full_path),
comment='Parent file name')
ccd.header.set('GSP_FNAM',
value=os.path.basename(full_path),
comment='Current file name')
ccd.header.set('GSP_PATH',
value=os.path.dirname(full_path),
comment='Location at moment of reduce')
if 'GSP_TECH' not in all_keys:
ccd.header.set('GSP_TECH',
value=technique,
comment='Observing technique')
if 'GSP_DATE' not in all_keys:
ccd.header.set('GSP_DATE',
value=time.strftime("%Y-%m-%d"),
comment='Processing date')
if 'GSP_OVER' not in all_keys:
ccd.header.set('GSP_OVER',
value='none',
comment='Overscan region')
if 'GSP_TRIM' not in all_keys:
ccd.header.set('GSP_TRIM',
value='none',
comment='Trim section')
if 'GSP_SLIT' not in all_keys:
ccd.header.set('GSP_SLIT',
value='none',
comment='Slit trim section, slit illuminated area only')
if 'GSP_BIAS' not in all_keys:
ccd.header.set('GSP_BIAS',
value='none',
comment='Master bias image')
if 'GSP_FLAT' not in all_keys:
ccd.header.set('GSP_FLAT',
value='none',
comment='Master flat image')
if 'GSP_NORM' not in all_keys:
ccd.header.set('GSP_NORM',
value='none',
comment='Flat normalization method')
if 'GSP_COSM' not in all_keys:
ccd.header.set('GSP_COSM',
value='none',
comment='Cosmic ray rejection method')
if 'GSP_TMOD' not in all_keys:
ccd.header.set('GSP_TMOD',
value='none',
comment='Model name used to fit trace')
if 'GSP_EXTR' not in all_keys:
ccd.header.set('GSP_EXTR',
value='none',
comment='Extraction window at first column')
if 'GSP_BKG1' not in all_keys:
ccd.header.set('GSP_BKG1',
value='none',
comment='First background extraction zone')
if 'GSP_BKG2' not in all_keys:
ccd.header.set('GSP_BKG2',
value='none',
comment='Second background extraction zone')
if 'GSP_WRMS' not in all_keys:
ccd.header.set('GSP_WRMS',
value='none',
comment='Wavelength solution RMS Error')
if 'GSP_WPOI' not in all_keys:
ccd.header.set('GSP_WPOI',
value='none',
comment='Number of points used to '
'calculate wavelength solution')
if 'GSP_WREJ' not in all_keys:
ccd.header.set('GSP_WREJ',
value='none',
comment='Number of points rejected')
if '' not in all_keys:
ccd.header.add_blank('-- Goodman Spectroscopic Pipeline --',
before='GSP_VERS')
ccd.header.add_blank('-- GSP END --', after='GSP_WREJ')
ccd.header.set('BUNIT', after='CCDSUM')
return ccd
def recenter_broad_lines(lamp_data, lines, order):
"""Recenter broad lines
Notes:
This method is used to recenter broad lines only, there is a special
method for dealing with narrower lines.
Args:
lamp_data (ndarray): numpy.ndarray instance. It contains the lamp
data.
lines (list): A line list in pixel values.
order (float): A rough estimate of the FWHM of the lines in pixels
in the data. It is calculated using the slit size divided by the
pixel scale multiplied by the binning.
Returns:
A list containing the recentered line positions.
"""
# TODO (simon): use slit size information for a square function
# TODO (simon): convolution
new_line_centers = []
gaussian_kernel = Gaussian1DKernel(stddev=2.)
lamp_data = convolve(lamp_data, gaussian_kernel)
for line in lines:
lower_index = max(0, int(line - order))
upper_index = min(len(lamp_data), int(line + order))
lamp_sample = lamp_data[lower_index:upper_index]
x_axis = np.linspace(lower_index, upper_index, len(lamp_sample))
line_max = np.max(lamp_sample)
gaussian_model = models.Gaussian1D(amplitude=line_max,
mean=line,
stddev=order)
fit_gaussian = fitting.LevMarLSQFitter()
fitted_gaussian = fit_gaussian(gaussian_model, x_axis, lamp_sample)
new_line_centers.append(fitted_gaussian.mean.value)
return new_line_centers
def recenter_lines(data, lines, plots=False):
"""Finds the centroid of an emission line
For every line center (pixel value) it will scan left first until the
data stops decreasing, it assumes it is an emission line and then will
scan right until it stops decreasing too. Defined those limits it will
use the line data in between and calculate the centroid.
Notes:
This method is used to recenter relatively narrow lines only, there
is a special method for dealing with broad lines.
Args:
data (ndarray): numpy.ndarray instance. or the data attribute of a
:class:`~astropy.nddata.CCDData` instance.
lines (list): A line list in pixel values.
plots (bool): If True will plot spectral line as well as the input
center and the recentered value.
Returns:
A list containing the recentered line positions.
"""
new_center = []
x_size = data.shape[0]
median = np.median(data)
for line in lines:
# TODO (simon): Check if this definition is valid, so far is not
# TODO (cont..): critical
left_limit = 0
right_limit = 1
condition = True
left_index = int(line)
while condition and left_index - 2 > 0:
if (data[left_index - 1] > data[left_index]) and \
(data[left_index - 2] > data[left_index - 1]):
condition = False
left_limit = left_index
elif data[left_index] < median:
condition = False
left_limit = left_index
else:
left_limit = left_index
left_index -= 1
# id right limit
condition = True
right_index = int(line)
while condition and right_index + 2 < x_size - 1:
if (data[right_index + 1] > data[right_index]) and \
(data[right_index + 2] > data[right_index + 1]):
condition = False
right_limit = right_index
elif data[right_index] < median:
condition = False
right_limit = right_index
else:
right_limit = right_index
right_index += 1
index_diff = [abs(line - left_index), abs(line - right_index)]
sub_x_axis = range(line - min(index_diff),
(line + min(index_diff)) + 1)
sub_data = data[line - min(index_diff):(line + min(index_diff)) + 1]
centroid = np.sum(sub_x_axis * sub_data) / np.sum(sub_data)
# checks for asymmetries
differences = [abs(data[line] - data[left_limit]),
abs(data[line] - data[right_limit])]
if max(differences) / min(differences) >= 2.:
if plots: # pragma: no cover
plt.axvspan(line - 1, line + 1, color='g', alpha=0.3)
new_center.append(line)
else:
new_center.append(centroid)
if plots: # pragma: no cover
fig, ax = plt.subplots(1, 1)
fig.canvas.set_window_title('Lines Detected in Lamp')
ax.axhline(median, color='b')
ax.plot(range(len(data)),
data,
color='k',
label='Lamp Data')
for line in lines:
ax.axvline(line + 1,
color='k',
linestyle=':',
label='First Detected Center')
for center in new_center:
ax.axvline(center,
color='k',
linestyle='.-',
label='New Center')
plt.show()
return new_center
def record_trace_information(ccd, trace_info):
"""Adds trace information to fits header
Notes:
Example of trace_info.
OrderedDict([('GSP_TMOD', ['Polynomial1D', 'Model name used to fit trace']),
('GSP_TORD', [2, 'Degree of the model used to fit target trace']),
('GSP_TC00', [80.92244303468138, 'Parameter c0']),
('GSP_TC01', [0.0018921968204536187, 'Parameter c1']),
('GSP_TC02', [-7.232545448865748e-07, 'Parameter c2']),
('GSP_TERR', [0.18741058188097284, 'RMS error of target trace'])])
Args:
ccd (CCDData): ccdproc.CCDData instance to have trace info recorded into its
header.
trace_info (OrderedDict): Ordered Dictionary with a set of fits keywords
associated to a list of values corresponding to value and comment.
Returns:
ccd (CCDData): Same ccdproc.CCDData instance with the header modified.
"""
last_keyword = None
for info_key in trace_info:
info_value, info_comment = trace_info[info_key]
log.debug(
"Adding trace information: "
"{:s} = {:s} / {:s}".format(info_key,
str(info_value),
info_comment))
if last_keyword is None:
ccd.header.set(info_key,
value=info_value,
comment=info_comment)
last_keyword = info_key
else:
ccd.header.set(info_key,
value=info_value,
comment=info_comment,
after=last_keyword)
last_keyword = info_key
return ccd
def save_extracted(ccd, destination, prefix='e', target_number=1):
"""Save extracted spectrum while adding a prefix.
Args:
ccd (CCDData) :class:`~astropy.nddata.CCDData` instance
destination (str): Path where the file will be saved.
prefix (str): Prefix to be added to images. Default `e`.
target_number (int): Secuential number of spectroscopic target.
Returns:
:class:`~astropy.nddata.CCDData` instance of the image just recorded.
although is not really necessary.
"""
assert isinstance(ccd, ccdproc.CCDData)
assert os.path.isdir(destination)
file_name = ccd.header['GSP_FNAM']
if target_number > 0:
new_suffix = '_target_{:d}.fits'.format(target_number)
file_name = re.sub('.fits', new_suffix, file_name)
if ccd.header['OBSTYPE'] in ['COMP', 'ARC']:
extraction_region = re.sub(':','-', ccd.header['GSP_EXTR'])
file_name = re.sub('.fits', '_{:s}.fits'.format(extraction_region),
file_name)
new_file_name = prefix + file_name
else:
new_file_name = prefix + file_name
log.info("Saving uncalibrated(w) extracted spectrum to file: "
"{:s}".format(new_file_name))
full_path = os.path.join(destination, new_file_name)
ccd = write_fits(ccd=ccd, full_path=full_path, parent_file=file_name)
return ccd
def search_comp_group(object_group, comp_groups, reference_data):
"""Search for a suitable comparison lamp group
In case a science target was observed without comparison lamps, usually
right before or right after, this function will look for a compatible set
obtained at a different time or pointing.
Notes:
This methodology is not recommended for radial velocity studies.
Args:
object_group (DataFrame): A :class:`~pandas.DataFrame` instances
containing a group of images for a given scientific target.
comp_groups (list): A list in which every element is a
:class:`~pandas.DataFrame`
that contains information regarding groups of comparison lamps.
reference_data (ReferenceData): Instance of
`goodman.pipeline.core.ReferenceData` contains all information
related to the reference lamp library.
Returns:
"""
log.debug('Finding a suitable comparison lamp group')
object_confs = object_group.groupby(['grating',
'cam_targ',
'grt_targ',
'filter',
'filter2']
).size().reset_index()
# .rename(columns={0: 'count'})
for comp_group in comp_groups:
if ((comp_group['grating'] == object_confs.iloc[0]['grating']) &
(comp_group['cam_targ'] == object_confs.iloc[0]['cam_targ']) &
(comp_group['grt_targ'] == object_confs.iloc[0]['grt_targ']) &
(comp_group['filter'] == object_confs.iloc[0]['filter']) &
(comp_group['filter2'] == object_confs.iloc[0]['filter2']
)).all():
if reference_data.check_comp_group(comp_group) is not None:
log.debug('Found a matching comparison lamp group')
return comp_group
raise NoMatchFound
def setup_logging(debug=False, generic=False): # pragma: no cover
"""configures logging
Notes:
Logging file name is set to default 'goodman_log.txt'.
If --debug is activated then the format of the message is different.
"""
log_filename = 'goodman_log.txt'
if '--debug' in sys.argv or debug:
log_format = '[%(asctime)s][%(levelname)8s]: %(message)s ' \
'[%(module)s.%(funcName)s:%(lineno)d]'
logging_level = logging.DEBUG
else:
log_format = '[%(asctime)s][%(levelname).1s]: %(message)s'
logging_level = logging.INFO
date_format = '%H:%M:%S'
formatter = logging.Formatter(fmt=log_format,
datefmt=date_format)
logging.basicConfig(level=logging_level,
format=log_format,
datefmt=date_format)
log = logging.getLogger(__name__)
file_handler = logging.FileHandler(filename=log_filename)
file_handler.setFormatter(fmt=formatter)
file_handler.setLevel(level=logging_level)
log.addHandler(file_handler)
if not generic:
log.info("Starting Goodman HTS Pipeline Log")
log.info("Local Time : {:}".format(
datetime.datetime.now()))
log.info("Universal Time: {:}".format(
datetime.datetime.utcnow()))
try:
latest_release = check_version.get_last()
if "dev" in __version__:
log.warning("Running Development version: {:s}".format(__version__))
log.info("Latest Release: {:s}".format(latest_release))
elif check_version.am_i_updated(__version__):
if __version__ == latest_release:
log.info("Pipeline Version: {:s} (latest)".format(__version__))
else:
log.warning("Current Version: {:s}".format(__version__))
log.info("Latest Release: {:s}".format(latest_release))
else:
log.warning("Current Version '{:s}' is outdated.".format(
__version__))
log.info("Latest Release: {:s}".format(latest_release))
except ConnectionRefusedError:
log.error('Unauthorized GitHub API Access reached maximum')
log.info("Current Version: {:s}".format(__version__))
def trace(ccd,
model,
trace_model,
model_fitter,
sampling_step,
nfwhm=1,
plots=False):
"""Find the trace of a spectrum
This function is called by the `trace_targets` function, the difference is
that it only takes single models only not `CompoundModels` so this function
is called for every single target. `CompoundModels` are a bit tricky when
you need each model separated so all `CompoundModels` have been removed.
Notes:
This method forces the trace to go withing a rectangular region of
center `model.mean.value` and width `2 * nsigmas`, this is for allowing
the tracing of low SNR targets. The assumption is valid since the
spectra are always well aligned to the detectors's pixel columns.
(dispersion axis)
Args:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance, 2D image.
model (Model): An astropy.modeling.Model instance that contains
information regarding the target to be traced.
trace_model (object): An astropy.modeling.Model instance, usually a low
order polynomial.
model_fitter (Fitter): An astropy.modeling.fitting.Fitter instance. Will
fit the sampled points to construct the trace model
sampling_step (int): Step for sampling the spectrum.
nfwhm (int): Number of fwhm to each side of the mean to be used for
searching the trace.
plots (bool): Toggles debugging plot
Returns:
An `astropy.modeling.Model` instance, that defines the trace of the
spectrum.
"""
assert isinstance(ccd, ccdproc.CCDData)
assert isinstance(model, Model)
assert isinstance(trace_model, Model)
spatial_length, dispersion_length = ccd.data.shape
sampling_axis = range(0, dispersion_length, sampling_step)
sample_values = []
if model.__class__.name == 'Gaussian1D':
model_fwhm = model.fwhm
model_mean = model.mean.value
elif model.__class__.name == 'Moffat1D':
model_fwhm = model.fwhm
model_mean = model.x_0.value
else:
raise NotImplementedError
sample_center = float(model_mean)
lower_limit_list = []
upper_limit_list = []
lower_limit = None
upper_limit = None
for point in sampling_axis:
lower_limit = np.max([0, int(sample_center - nfwhm * model_fwhm)])
upper_limit = np.min([int(sample_center + nfwhm * model_fwhm),
spatial_length])
lower_limit_list.append(lower_limit)
upper_limit_list.append(upper_limit)
sample = ccd.data[lower_limit:upper_limit, point:point + sampling_step]
sample_median = np.median(sample, axis=1)
try:
sample_peak = np.argmax(sample_median)
except ValueError: # pragma: no cover
log.error('Nfwhm {}'.format(nfwhm))
log.error('Model Stddev {}'.format(model_fwhm))
log.error('sample_center {}'.format(sample_center))
log.error('sample {}'.format(sample))
log.error('sample_median {}'.format(sample_median))
log.error('lower_limit {}'.format(lower_limit))
log.error('upper_limit {}'.format(upper_limit))
log.error('point {}'.format(point))
log.error('point + sampling_step {}'.format(point + sampling_step))
log.error("Spatial length: {}, Dispersion length {}".format(
spatial_length,
dispersion_length))
sys.exit()
sample_values.append(sample_peak + lower_limit)
if np.abs(sample_peak + lower_limit - model_mean)\
< nfwhm * model_fwhm:
sample_center = int(sample_peak + lower_limit)
else:
sample_center = float(model_mean)
trace_model.c2.fixed = True
fitted_trace = model_fitter(trace_model, sampling_axis, sample_values)
sampling_differences = [
(fitted_trace(sampling_axis[i]) - sample_values[i]) ** 2
for i in range(len(sampling_axis))]
rms_error = np.sqrt(
np.sum(np.array(sampling_differences))/len(sampling_differences))
log.debug("RMS Error of unclipped trace differences {:.3f}".format(
rms_error))
clipped_values = sigma_clip(sampling_differences,
sigma=2,
maxiters=3,
cenfunc=np.ma.median)
if np.ma.is_masked(clipped_values):
_sampling_axis = list(sampling_axis)
_sample_values = list(sample_values)
sampling_axis = []
sample_values = []
for i in range(len(clipped_values)):
if clipped_values[i] is not np.ma.masked:
sampling_axis.append(_sampling_axis[i])
sample_values.append(_sample_values[i])
log.debug("Re-fitting the trace for a better trace.")
trace_model.c2.fixed = False
fitted_trace = model_fitter(trace_model, sampling_axis, sample_values)
sampling_differences = [
(fitted_trace(sampling_axis[i]) - sample_values[i]) ** 2 for i in
range(len(sampling_axis))]
rms_error = np.sqrt(
np.sum(np.array(sampling_differences)) / len(sampling_differences))
log.debug(
"RMS Error after sigma-clipping trace differences {:.3f}".format(
rms_error))
trace_info = collections.OrderedDict()
trace_info['GSP_TMOD'] = [fitted_trace.__class__.__name__,
'Model name used to fit trace']
trace_info['GSP_TORD'] = [fitted_trace.degree,
'Degree of the model used to fit target trace']
for i in range(fitted_trace.degree + 1):
trace_info['GSP_TC{:02d}'.format(i)] = [
fitted_trace.__getattribute__('c{:d}'.format(i)).value,
'Parameter c{:d}'.format(i)]
trace_info['GSP_TERR'] = [rms_error, 'RMS error of target trace']
log.info("Target tracing RMS error: {:.3f}".format(rms_error))
if plots: # pragma: no cover
z1 = np.mean(ccd.data) - 0.5 * np.std(ccd.data)
z2 = np.median(ccd.data) + np.std(ccd.data)
fig, ax = plt.subplots()
fig.canvas.set_window_title(ccd.header['GSP_FNAM'])
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title("Tracing information\n{:s}\n"
"RMS Error {:.2f}".format(ccd.header['GSP_FNAM'],
rms_error))
ax.imshow(ccd.data, clim=(z1, z2), cmap='gray')
ax.plot(sampling_axis,
sample_values,
color='b',
marker='o',
alpha=0.4,
label='Sampling points')
sampling_axis_limits = range(0, dispersion_length, sampling_step)
low_span = fitted_trace(sampling_axis_limits) - (fitted_trace(sampling_axis_limits) - np.mean(lower_limit_list))
up_span = fitted_trace(sampling_axis_limits) + (np.mean(upper_limit_list) - fitted_trace(sampling_axis_limits))
ax.fill_between(sampling_axis_limits,
low_span,
up_span,
where=up_span > low_span,
facecolor='g',
interpolate=True,
alpha=0.3,
label='Aproximate extraction window')
ax.plot(fitted_trace(range(dispersion_length)),
color='r',
linestyle='--',
label='Fitted Trace Model')
# plt.plot(model(range(spatial_length)))
ax.legend(loc='best')
plt.tight_layout()
if plt.isinteractive():
plt.draw()
plt.pause(2)
else:
plt.show()
return fitted_trace, trace_info
def trace_targets(ccd, target_list, sampling_step=5, pol_deg=2, nfwhm=5,
plots=False):
"""Find the trace of the target's spectrum on the image
This function defines a low order polynomial that trace the location of the
spectrum. The attributes pol_deg and sampling_step define the polynomial
degree and the spacing in pixels for the samples. For every sample a
gaussian model is fitted and the center (mean) is recorded and since
spectrum traces vary smoothly this value is used as a new center for the
base model used to fit the spectrum profile.
Notes:
This doesn't work for extended sources. Also this calls for the function
`trace` for doing the actual trace, the difference is that this method
is at a higher level.
Args:
ccd (CCDData) Instance of :class:`~astropy.nddata.CCDData`
target_list (list): List of single target profiles.
sampling_step (int): Frequency of sampling in pixels
pol_deg (int): Polynomial degree for fitting the trace
plots (bool): If True will show plots (debugging)
nfwhm (int): Number of fwhm from spatial profile center to search for
a target. default 10.
Returns:
all_traces (list): List that contains traces that are
astropy.modeling.Model instance
"""
# added two assert for debugging purposes
assert isinstance(ccd, ccdproc.CCDData)
assert all([isinstance(profile, Model) for profile in target_list])
# Initialize model fitter
model_fitter = fitting.LevMarLSQFitter()
# Initialize the model to fit the traces
trace_model = models.Polynomial1D(degree=pol_deg)
# List that will contain all the Model instances corresponding to traced
# targets
all_traces = []
for profile in target_list:
single_trace, trace_info = trace(ccd=ccd,
model=profile,
trace_model=trace_model,
model_fitter=model_fitter,
sampling_step=sampling_step,
nfwhm=nfwhm,
plots=plots)
if 0 < single_trace.c0.value < ccd.shape[0]:
log.debug('Adding trace to list')
all_traces.append([single_trace, profile, trace_info])
else:
log.error("Unable to trace target.")
log.error('Trace is out of boundaries. Center: '
'{:.4f}'.format(single_trace.c0.value))
if plots: # pragma: no cover
z1 = np.mean(ccd.data) - 0.5 * np.std(ccd.data)
z2 = np.median(ccd.data) + np.std(ccd.data)
fig, ax = plt.subplots()
fig.canvas.set_window_title(ccd.header['GSP_FNAM'])
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax.set_title("Trace(s) for {:s}".format(ccd.header['GSP_FNAM']))
ax.imshow(ccd.data, clim=(z1, z2), cmap='gray')
ax.plot([], color='r', label='Trace(s)')
for strace, prof, trace_info in all_traces:
ax.plot(strace(range(ccd.data.shape[1])), color='r')
ax.legend(loc='best')
plt.tight_layout()
plt.show()
return all_traces
def validate_ccd_region(ccd_region, regexp='^\[\d*:\d*,\d*:\d*\]$'):
compiled_reg_exp = re.compile(regexp)
if not compiled_reg_exp.match(ccd_region):
raise SyntaxError("ccd regions must be defined in the format "
"'[x1:x2,y1:y2]'")
else:
return True
def write_fits(ccd,
full_path,
combined=False,
parent_file=None,
overwrite=True):
"""Write fits while adding information to the header.
This is a wrapper for allowing to save files while being able to add
information into the header. Mostly for historical reasons.
Args:
ccd (CCDData) A :class:`~astropy.nddata.CCDData` instance to be saved
to fits.
full_path (str): Full path of file.
combined (bool): True if `ccd` is the result of combining images.
parent_file (str): Name of the file from which ccd originated. If
combined is True this will be set to `combined`.
overwrite (bool): Overwrite files, default True.
Returns:
:class:`~astropy.nddata.CCDData` instance.
"""
assert isinstance(ccd, ccdproc.CCDData)
if os.path.isabs(full_path) and not os.path.isdir(os.path.dirname(full_path)):
log.error("Directory {} does not exist. Creating it right now."
"".format(os.path.dirname(full_path)))
os.mkdir(os.path.dirname(full_path))
# Original File Name
# This should be set only once.
if combined:
ccd.header.set('GSP_ONAM',
value=os.path.basename(full_path))
ccd.header.set('GSP_PNAM',
value='combined')
# Parent File Name
if not combined and parent_file is not None:
ccd.header.set('GSP_PNAM',
value=os.path.basename(parent_file))
# Current File Name
ccd.header.set('GSP_FNAM', value=os.path.basename(full_path))
ccd.header.set('GSP_PATH', value=os.path.dirname(full_path))
# write to file
log.info("Saving FITS file to {:s}".format(os.path.basename(full_path)))
ccd.write(full_path, overwrite=overwrite)
assert os.path.isfile(full_path)
return ccd
# classes definition
class GenerateDcrParFile(object):
"""Creates dcr.par file based on lookup table
`dcr` parameters depend heavily on binning, this class generates a file
using the default format. The lookup table considers camera and binning.
"""
_format = [
"THRESH = {:.1f} // Threshold (in STDDEV)",
"XRAD = {:d} // x-radius of the box (size = 2 * radius)",
"YRAD = {:d} // y-radius of the box (size = 2 * radius)",
"NPASS = {:d} // Maximum number of cleaning passes",
"DIAXIS = {:d} // Dispersion axis: 0 - no dispersion, 1 - X, 2 - Y",
"LRAD = {:d} // Lower radius of region for replacement statistics",
"URAD = {:d} // Upper radius of region for replacement statistics",
"GRAD = {:d} // Growing radius",
"VERBOSE = {:d} // Verbose level [0,1,2]",
"END"]
_columns = ['parameter',
'red-1',
'red-2',
'red-3',
'blue-1',
'blue-2',
'blue-3']
_lookup = [
['thresh', 3.0, 4.0, 3.0, 3.0, 3.0, 3.0],
['xrad', 9, 7, 9, 8, 9, 9],
['yrad', 9, 9, 9, 8, 9, 9],
['npass', 5, 5, 5, 5, 5, 5],
['diaxis', 0, 0, 0, 0, 0, 0],
['lrad', 1, 1, 1, 1, 1, 1],
['urad', 3, 3, 3, 3, 3, 3],
['grad', 1, 0, 1, 1, 1, 1],
['verbose', 1, 1, 1, 1, 1, 1]
]
def __init__(self, par_file_name='dcr.par'):
"""
Args:
par_file_name:
"""
self._file_name = par_file_name
self._df = pandas.DataFrame(self._lookup, columns=self._columns)
self._binning = "{:s}-{:s}"
self._data_format = "\n".join(self._format)
def __call__(self, instrument='Red', binning='1', path='default'):
"""
Args:
instrument (str): Instrument from INSTCONF keyword
binning (str): Serial (dispersion) Binning from the header.
path (str): Directory where to save the file.
"""
assert any([instrument == option for option in ['Red', 'Blue']])
b = self._binning.format(instrument.lower(), binning)
self._data_format = self._data_format.format(
self._df[b][self._df.parameter == 'thresh'].values[0],
int(self._df[b][self._df.parameter == 'xrad'].values[0]),
int(self._df[b][self._df.parameter == 'yrad'].values[0]),
int(self._df[b][self._df.parameter == 'npass'].values[0]),
int(self._df[b][self._df.parameter == 'diaxis'].values[0]),
int(self._df[b][self._df.parameter == 'lrad'].values[0]),
int(self._df[b][self._df.parameter == 'urad'].values[0]),
int(self._df[b][self._df.parameter == 'grad'].values[0]),
int(self._df[b][self._df.parameter == 'verbose'].values[0]))
self._create_file(path=path)
def _create_file(self, path):
"""Creates `dcr.par` file
Args:
path (str): Path to where to save the `dcr.par` file.
"""
if os.path.isdir(path):
full_path = os.path.join(path, self._file_name)
else:
full_path = os.path.join(os.getcwd(), self._file_name)
with open(full_path, 'w') as dcr_par:
dcr_par.write(self._data_format)
class NightDataContainer(object):
"""This class is designed to be the organized data container. It doesn't
store image data but a list of :class:`~pandas.DataFrame` objects. Also it
stores critical variables such as sunrise and sunset times.
"""
def __init__(self, path, instrument, technique):
"""Initializes all the variables for the class
Args:
path (str): Full path to the directory where raw data is located
instrument (str): `Red` or `Blue` stating whether the data was taken
using the Red or Blue Goodman Camera.
technique (str): `Spectroscopy` or `Imaging` stating what kind of
data was taken.
"""
self.full_path = path
self.instrument = instrument
self.technique = technique
self.gain = None
self.rdnoise = None
self.roi = None
self.is_empty = True
"""For imaging use"""
self.bias = None
self.day_flats = None
self.dome_flats = None
self.sky_flats = None
self.data_groups = None
"""For spectroscopy use"""
# comp_groups will store :class:`~pandas.DataFrame` (groups) that
# contain only OBSTYPE == COMP, they should be requested only when
# needed, for the science case when for every science target is
# observed with comparison lamps and quartz (if)
self.comp_groups = None
# object_groups will store :class:`~pandas.DataFrame` (groups) with only
# OBSTYPE == OBJECT this is the case when the observer takes comparison
# lamps only at the beginning or end of the night.
self.object_groups = None
# spec_groups will store :class:`~pandas.DataFrame` (groups) with a set
# of OBJECT and COMP, this is usually the case for radial velocity
# studies.
self.spec_groups = None
"""Time reference points"""
self.sun_set_time = None
self.sun_rise_time = None
self.evening_twilight = None
self.morning_twilight = None
def __repr__(self):
"""Produces a nice summary of the information contained"""
if self.is_empty:
return str("Empty Data Container")
else:
class_info = str("{:s}\n"
"Full Path: {:s}\n"
"Instrument: {:s}\n"
"Technique: {:s}".format(str(self.__class__),
self.full_path,
self.instrument,
self.technique))
if all([self.gain, self.rdnoise, self.roi]):
class_info += str("\nGain: {:.2f}\n"
"Readout Noise: {:.2f}\n"
"ROI: {:s}".format(self.gain,
self.rdnoise,
self.roi))
class_info += str("\nIs Empty: {:s}\n".format(str(self.is_empty)))
group_info = "\nData Grouping Information\n"
group_info += "BIAS Group:\n"
group_info += self._get_group_repr(self.bias)
group_info += "Day FLATs Group:\n"
group_info += self._get_group_repr(self.day_flats)
group_info += "Dome FLATs Group:\n"
group_info += self._get_group_repr(self.dome_flats)
group_info += "Sky FLATs Group:\n"
group_info += self._get_group_repr(self.sky_flats)
if self.technique == 'Spectroscopy':
group_info += "COMP Group:\n"
group_info += self._get_group_repr(self.comp_groups)
group_info += "OBJECT Group\n"
group_info += self._get_group_repr(self.object_groups)
group_info += "OBJECT + COMP Group:\n"
group_info += self._get_group_repr(self.spec_groups)
# group_info += self._get_group_repr(self.data_groups)
class_info += group_info
elif self.technique == 'Imaging':
group_info += "DATA Group:\n"
group_info += self._get_group_repr(self.data_groups)
class_info += group_info
return class_info
@staticmethod
def _get_group_repr(group):
"""Converts the file names in each group to string
This class has a __repr__ method and in this method the file names
contained in the different groups gets formatted as a string for
displaying in a readable way.
"""
group_str = ""
if group is not None:
for i in range(len(group)):
if len(group) == 1:
group_str += "Files in Group\n"
else:
group_str += "Files in Group {:d}\n".format(i + 1)
for _file in group[i]['file']:
group_str += " {:s}\n".format(_file)
return group_str
else:
return " Group is Empty\n"
def add_bias(self, bias_group):
"""Adds a bias group
Args:
bias_group (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if len(bias_group) < 2:
if self.technique == 'Imaging':
log.error('Imaging mode needs BIAS to work properly. '
'Go find some.')
else:
log.warning('BIAS are needed for optimal results.')
else:
if self.bias is None:
self.bias = [bias_group]
else:
self.bias.append(bias_group)
if self.bias is not None:
self.is_empty = False
def add_day_flats(self, day_flats):
""""Adds a daytime flat group
Args:
day_flats (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if self.day_flats is None:
self.day_flats = [day_flats]
else:
self.day_flats.append(day_flats)
if self.day_flats is not None:
self.is_empty = False
def add_data_group(self, data_group):
"""Adds a data group
Args:
data_group (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if self.data_groups is None:
self.data_groups = [data_group]
else:
self.data_groups.append(data_group)
if self.data_groups is not None:
self.is_empty = False
def add_comp_group(self, comp_group):
"""Adds a comp-only group
All comparison lamps groups are added here. The ones that may have been
taken in the afternoon (isolated) or along science target. This will
act as a pool of comparison lamp groups for eventual science targets
taken without comparison lamps.
Args:
comp_group (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if self.comp_groups is None:
self.comp_groups = [comp_group]
else:
self.comp_groups.append(comp_group)
if self.comp_groups is not None:
self.is_empty = False
def add_object_group(self, object_group):
"""Adds a object-only group
Args:
object_group (DataFrame): A :class:`~pandas.DataFrame` Contains a
set of keyword values of grouped image metadata
"""
if self.object_groups is None:
self.object_groups = [object_group]
else:
self.object_groups.append(object_group)
if self.object_groups is not None:
self.is_empty = False
def add_spec_group(self, spec_group):
"""Adds a data group containing object and comp
The comparison lamp groups are also added to a general pool of
comparison lamps.
Args:
spec_group (DataFrame): A :class:`~pandas.DataFrame` Contains a set
of keyword values of grouped image metadata
"""
if self.spec_groups is None:
self.spec_groups = [spec_group]
else:
self.spec_groups.append(spec_group)
if self.spec_groups is not None:
self.is_empty = False
comp_group = spec_group[spec_group.obstype == 'COMP']
self.add_comp_group(comp_group=comp_group)
def set_sun_times(self, sun_set, sun_rise):
"""Sets values for sunset and sunrise
Args:
sun_set (str): Sun set time in the format 'YYYY-MM-DDTHH:MM:SS.SS'
sun_rise (str):Sun rise time in the format 'YYYY-MM-DDTHH:MM:SS.SS'
"""
self.sun_set_time = sun_set
self.sun_rise_time = sun_rise
def set_twilight_times(self, evening, morning):
"""Sets values for evening and morning twilight
Args:
evening (str): Evening twilight time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
morning (str): Morning twilight time in the format
'YYYY-MM-DDTHH:MM:SS.SS'
"""
self.evening_twilight = evening
self.morning_twilight = morning
def set_readout(self, gain, rdnoise, roi):
"""Set Gain, Read noise and ROI.
Args:
gain (float): Gain from header
rdnoise (float): Read noise from header.
roi (str): ROI from header.
"""
self.gain = gain
self.rdnoise = rdnoise
self.roi = roi
class NoMatchFound(Exception): # pragma: no cover
"""Exception for when no match is found."""
def __init__(self, message="No match found"):
Exception.__init__(self, message)
class NoTargetException(Exception): # pragma: no cover
"""Exception to be raised when no target is identified"""
def __init__(self):
Exception.__init__(self, 'No targets identified.')
class NotEnoughLinesDetected(Exception): # pragma: no cover
"""Exception for when there are no lines detected."""
def __init__(self):
Exception.__init__(self, 'Not enough lines detected.')
class ReferenceData(object):
"""Contains spectroscopic reference lines values and filename to templates.
This class stores:
- file names for reference fits spectrum
- file names for CSV tables with reference lines and relative
intensities
- line positions only for the elements used in SOAR comparison lamps
"""
def __init__(self, reference_dir):
"""Init method for the ReferenceData class
This methods uses ccdproc.ImageFileCollection on the `reference_dir` to
capture all possible reference lamps. The reference lamps have a list
of lines detected on the data registered to the header as GSP_P??? where
??? are numbers from 001 to 999. Also the pixel values are stored in
keywords of the form GSP_A???.
Args:
reference_dir (str): full path to the reference data directory
"""
self.log = logging.getLogger(__name__)
self.reference_dir = reference_dir
reference_collection = ccdproc.ImageFileCollection(self.reference_dir)
self.ref_lamp_collection = reference_collection.summary.to_pandas()
self.lines_pixel = None
self.lines_angstrom = None
self._ccd = None
self.nist = {}
self.lamp_status_keywords = [
'LAMP_HGA',
'LAMP_NE',
'LAMP_AR',
'LAMP_FE',
'LAMP_CU',
'LAMP_QUA',
'LAMP_QPE',
'LAMP_BUL',
'LAMP_DOM',
'LAMP_DPE']
def get_reference_lamp(self, header):
"""Finds a suitable template lamp from the catalog
Args:
header (Header): FITS header of image we are looking a reference
lamp.
Returns:
full path to best matching reference lamp.
"""
if all([keyword in [hkey for hkey in header.keys()] for keyword in self.lamp_status_keywords]):
self.log.info("Searching matching reference lamp")
filtered_collection = self.ref_lamp_collection[(
(self.ref_lamp_collection['lamp_hga'] == header['LAMP_HGA']) &
(self.ref_lamp_collection['lamp_ne'] == header['LAMP_NE']) &
(self.ref_lamp_collection['lamp_ar'] == header['LAMP_AR']) &
(self.ref_lamp_collection['lamp_fe'] == header['LAMP_FE']) &
(self.ref_lamp_collection['lamp_cu'] == header['LAMP_CU']) &
(self.ref_lamp_collection['wavmode'] == header['wavmode']))]
if filtered_collection.empty:
error_message = "Unable to find a match for: "\
"LAMP_HGA = {}, "\
"LAMP_NE = {}, "\
"LAMP_AR = {}, "\
"LAMP_FE = {}, "\
"LAMP_CU = {}, "\
"WAVMODE = {} ".format(header['LAMP_HGA'],
header['LAMP_NE'],
header['LAMP_AR'],
header['LAMP_FE'],
header['LAMP_CU'],
header['WAVMODE'])
self.log.error(error_message)
raise NoMatchFound(error_message)
else:
filtered_collection = self.ref_lamp_collection[
(self.ref_lamp_collection['object'] == header['object']) &
# TODO (simon): Wavemode can be custom (GRT_TARG, CAM_TARG, GRATING)
(self.ref_lamp_collection['wavmode'] == re.sub(' ', '_', header['wavmode']).upper())]
if filtered_collection.empty:
error_message = "Unable to find matching "\
"reference lamp for: "\
"OBJECT = {}, "\
"WAVMODE = {}".format(header['OBJECT'],
header['WAVMODE'])
self.log.error(error_message)
raise NoMatchFound(error_message)
if len(filtered_collection) == 1:
self.log.info(
"Reference Lamp Found: {:s}"
"".format("".join(filtered_collection.file.to_string(index=False).split())))
full_path = os.path.join(self.reference_dir,
"".join(filtered_collection.file.to_string(
index=False).split()))
self._ccd = ccdproc.CCDData.read(full_path, unit=u.adu)
self._recover_lines()
return self._ccd
else:
raise NotImplementedError(
"Found {} matches".format(len(filtered_collection)))
def lamp_exists(self, header):
"""Checks whether a matching lamp exist or not
Args:
object_name (str): Name of the lamp from 'OBJECT' keyword.
grating (str): Grating from 'GRATING' keyword.
grt_targ (float): Grating target from keyword 'GRT_TARG'.
cam_targ (float): Camera target from keyword 'CAM_TARG'.
Returns:
True of False depending if a single matching lamp exist.
Raises:
NotImplementedError if there are more than one lamp found.
"""
filtered_collection = self.ref_lamp_collection[
(self.ref_lamp_collection['lamp_hga'] == header['LAMP_HGA']) &
(self.ref_lamp_collection['lamp_ne'] == header['LAMP_NE']) &
(self.ref_lamp_collection['lamp_ar'] == header['LAMP_AR']) &
(self.ref_lamp_collection['lamp_cu'] == header['LAMP_CU']) &
(self.ref_lamp_collection['lamp_fe'] == header['LAMP_FE']) &
(self.ref_lamp_collection['grating'] == header['GRATING']) &
(self.ref_lamp_collection['grt_targ'] == header['GRT_TARG']) &
(self.ref_lamp_collection['cam_targ'] == header['CAM_TARG'])]
if filtered_collection.empty:
return False
elif len(filtered_collection) == 1:
return True
else:
raise NotImplementedError
def check_comp_group(self, comp_group):
"""Check if comparison lamp group has matching reference lamps
Args:
comp_group (DataFrame): A :class:`~pandas.DataFrame` instance that
contains meta-data for a group of comparison lamps.
Returns:
"""
lamps = comp_group.groupby(['grating',
'grt_targ',
'cam_targ',
'lamp_hga',
'lamp_ne',
'lamp_ar',
'lamp_fe',
'lamp_cu']).size().reset_index(
).rename(columns={0: 'count'})
# for the way the input is created this should run only once but the
# for loop has been left in case this happens.
for i in lamps.index:
pseudo_header = fits.Header()
# pseudo_header.set('OBJECT', value=lamps.iloc[i]['object'])
pseudo_header.set('GRATING', value=lamps.iloc[i]['grating'])
pseudo_header.set('GRT_TARG', value=lamps.iloc[i]['grt_targ'])
pseudo_header.set('CAM_TARG', value=lamps.iloc[i]['cam_targ'])
pseudo_header.set('LAMP_HGA', value=lamps.iloc[i]['lamp_hga'])
pseudo_header.set('LAMP_NE', value=lamps.iloc[i]['lamp_ne'])
pseudo_header.set('LAMP_AR', value=lamps.iloc[i]['lamp_ar'])
pseudo_header.set('LAMP_FE', value=lamps.iloc[i]['lamp_fe'])
pseudo_header.set('LAMP_CU', value=lamps.iloc[i]['lamp_cu'])
if self.lamp_exists(header=pseudo_header):
new_group = comp_group[
(comp_group['grating'] == lamps.iloc[i]['grating']) &
(comp_group['grt_targ'] == lamps.iloc[i]['grt_targ']) &
(comp_group['cam_targ'] == lamps.iloc[i]['cam_targ']) &
(comp_group['lamp_hga'] == lamps.iloc[i]['lamp_hga']) &
(comp_group['lamp_ne'] == lamps.iloc[i]['lamp_ne']) &
(comp_group['lamp_ar'] == lamps.iloc[i]['lamp_ar']) &
(comp_group['lamp_fe'] == lamps.iloc[i]['lamp_fe']) &
(comp_group['lamp_cu'] == lamps.iloc[i]['lamp_cu'])]
return new_group
else:
self.log.warning("The target's comparison lamps do not have "
"reference lamps.")
self.log.debug("In this case a compatible lamp will be "
"obtained from all the lamps obtained in the "
"data or present in the files.")
self.log.debug("Using the full set of comparison lamps "
"for extraction.")
return comp_group
return None
def _recover_lines(self):
"""Read lines from the reference lamp's header."""
self.log.info("Recovering line information from reference Lamp.")
self.lines_pixel = []
self.lines_angstrom = []
pixel_keys = self._ccd.header['GSP_P*']
for pixel_key in pixel_keys:
if re.match(r'GSP_P\d{3}', pixel_key) is not None:
angstrom_key = re.sub('GSP_P', 'GSP_A', pixel_key)
assert pixel_key[-3:] == angstrom_key[-3:]
assert angstrom_key in self._ccd.header
if int(float(self._ccd.header[angstrom_key])) != 0:
self.lines_pixel.append(float(self._ccd.header[pixel_key]))
self.lines_angstrom.append(
float(self._ccd.header[angstrom_key]))
else:
self.log.debug(
"File: {:s}".format(self._ccd.header['GSP_FNAM']))
self.log.debug(
"Ignoring keywords: {:s}={:f}, {:s}={:f}".format(
pixel_key,
self._ccd.header[pixel_key],
angstrom_key,
float(self._ccd.header[angstrom_key])))
@staticmethod
def _order_validation(lines_array):
"""Checks that the array of lines only increases."""
previous = None
for line_value in lines_array:
if previous is not None:
try:
assert line_value > previous
previous = line_value
except AssertionError:
log.error("Error: Line {:f} is not larger "
"than {:f}".format(line_value, previous))
return False
else:
previous = line_value
return True
def _load_nist_list(self, **kwargs):
"""Load all csv files from strong lines in NIST."""
nist_path = kwargs.get(
'path',
os.path.join(os.path.dirname(
sys.modules['goodman_pipeline'].__file__),
'data/nist_list'))
assert os.path.isdir(nist_path)
nist_files = glob.glob(os.path.join(nist_path, "*.txt"))
for nist_file in nist_files:
key = os.path.basename(nist_file)[22:-4]
nist_data = pandas.read_csv(nist_file, names=['intensity',
'air_wavelength',
'spectrum',
'reference'])
self.nist[key] = nist_data
class SaturationValues(object):
"""Contains a complete table of readout modes and 50% half well
"""
def __init__(self, ccd=None):
"""Defines a :class:`~pandas.DataFrame` with saturation_threshold information
Both, Red and Blue cameras have tabulated saturation_threshold values depending
on the readout configurations. It defines a :class:`~pandas.DataFrame`
object.
Notes:
For the purposes of this documentation *50% full well* is the same
as ``saturation_threshold level`` though they are not the same thing.
Args:
ccd (CCDData): Image to be tested for saturation_threshold
"""
self.log = logging.getLogger(__name__)
self.__saturation = None
columns = ['camera',
'read_rate',
'analog_attn',
'gain',
'read_noise',
'half_full_well',
'saturates_before']
saturation_table = [['Blue', 50, 0, 0.25, 3.33, 279600, True],
['Blue', 50, 2, 0.47, 3.35, 148723, True],
['Blue', 50, 3, 0.91, 3.41, 76813, True],
['Blue', 100, 0, 0.56, 3.69, 124821, True],
['Blue', 100, 2, 1.06, 3.72, 65943, True],
['Blue', 100, 3, 2.06, 3.99, 33932, False],
['Blue', 200, 0, 1.4, 4.74, 49928, False],
['Blue', 200, 2, 2.67, 5.12, 26179, False],
['Blue', 400, 0, 5.67, 8.62, 12328, False],
['Red', 100, 3, 1.54, 3.45, 66558, True],
['Red', 100, 2, 3.48, 5.88, 29454, False],
['Red', 344, 3, 1.48, 3.89, 69257, True],
['Red', 344, 0, 3.87, 7.05, 26486, False],
['Red', 750, 2, 1.47, 5.27, 69728, True],
['Red', 750, 2, 1.45, 5.27, 69728, True],
['Red', 750, 0, 3.77, 8.99, 27188, False],
['Red', 750, 0, 3.78, 8.99, 27188, False]]
self._sdf = pandas.DataFrame(saturation_table,
columns=columns)
if ccd is not None:
self.get_saturation_value(ccd=ccd)
@property
def saturation_value(self):
"""Saturation value in counts
In fact the value it returns is the 50% of full potential well,
Some configurations reach digital saturation_threshold before 50% of full
potential well, they are specified in the last column:
``saturates_before``.
Returns:
None if the value has not been defined
"""
if self.__saturation is None:
self.log.error('Saturation value not set')
return None
else:
return self.__saturation
def get_saturation_value(self, ccd):
"""Defines the saturation_threshold level
Args:
ccd (CCDData): Image to be tested for saturation_threshold
Returns:
The saturation_threshold value or None
"""
hfw = self._sdf.half_full_well[
(self._sdf.camera == ccd.header['INSTCONF']) &
(self._sdf.gain == ccd.header['GAIN']) &
(self._sdf.read_noise == ccd.header['RDNOISE'])]
if hfw.empty:
self.log.critical('Unable to obtain saturation_threshold level')
self.__saturation = None
return None
else:
self.__saturation = float("".join(hfw.to_string(index=False).split()))
self.log.debug("Set saturation_threshold level as {:.0f}".format(
self.__saturation))
return self.__saturation
class SpectroscopicMode(object):
def __init__(self):
"""Init method for the Spectroscopic Mode
This method defines a :class:`~pandas.DataFrame` instance that contains
all the current standard wavelength modes for Goodman HTS.
"""
self.log = logging.getLogger(__name__)
columns = ['grating_freq', 'wavmode', 'camtarg', 'grttarg', 'ob_filter']
spec_mode = [['400', 'm1', '11.6', '5.8', 'None'],
['400', 'm2', '16.1', '7.5', 'GG455'],
['600', 'UV', '15.25', '7.0', 'None'],
['600', 'Blue', '17.0', '7.0', 'None'],
['600', 'Mid', '20.0', '10.0', 'GG385'],
['600', 'Red', '27.0', '12.0', 'GG495'],
['930', 'm1', '20.6', '10.3', 'None'],
['930', 'm2', '25.2', '12.6', 'None'],
['930', 'm3', '29.9', '15.0', 'GG385'],
['930', 'm4', '34.6', '18.3', 'GG495'],
['930', 'm5', '39.4', '19.7', 'GG495'],
['930', 'm6', '44.2', '22.1', 'OG570'],
['1200', 'm0', '26.0', '16.3', 'None'],
['1200', 'm1', '29.5', '16.3', 'None'],
['1200', 'm2', '34.4', '18.7', 'None'],
['1200', 'm3', '39.4', '20.2', 'None'],
['1200', 'm4', '44.4', '22.2', 'GG455'],
['1200', 'm5', '49.6', '24.8', 'GG455'],
['1200', 'm6', '54.8', '27.4', 'GG495'],
['1200', 'm7', '60.2', '30.1', 'OG570'],
['1800', 'Custom', 'None', 'None', 'None'],
['2100', 'Custom', 'None', 'None', 'None'],
['2400', 'Custom', 'None', 'None', 'None']
]
self.modes_data_frame = | pandas.DataFrame(spec_mode, columns=columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster.bicluster import SpectralCoclustering
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool, ColumnDataSource
from itertools import product
######## practice pt1
x = pd.Series([6, 3, 8, 6], index=["q", "w", "e", "r"])
# print(x.index)
# print(x)
x = x.reindex(sorted(x.index))
# print(x.index)
# print(x)
y = pd.Series([7, 3, 5, 2], index=["e", "q", "r", "t"])
# print(x + y)
######## practice pt2
data = {'name': ['Tim', 'Jim', 'Pam', 'Sam'],
'age': [29, 31, 27, 35],
'ZIP': ['02115', '02130', '67700', '00100']}
y = pd.DataFrame(data, columns=["name", "age", "ZIP"])
# print(y.name)
######## whisky preparations and plotting
whisky = pd.read_csv("whiskies.txt")
whisky["Region"] = pd.read_csv("regions.txt")
# print("\n\nWhisky head\n", whisky.head())
# print("\nWhisky tail\n", whisky.tail())
# print(whisky.iloc[0:10])
# print(whisky.iloc[5:10, 0:5])
# print(whisky.columns)
flavors = whisky.iloc[:, 2:14]
corr_flavors = | pd.DataFrame.corr(flavors) | pandas.DataFrame.corr |
import glob
import os
import sys
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
import logging
from ftplib import FTP
import urllib
import numpy as np
import pandas as pd
from zipfile import ZipFile
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# this should be a table name that is not currently in use
dataset_name = 'dis_017_storm_events_us' #check
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory.
Two types of files are being used: details and locations.
Using bulk download via FTP, link to NOAA's storm events database:
https://www.ncdc.noaa.gov/stormevents/ftp.jsp
'''
# connect to the FTP server and login anonymously
ftp = FTP('ftp.ncdc.noaa.gov', timeout = 30)
ftp.login()
# navigate to the correct directory and get a list of all filenames
ftp.cwd('/pub/data/swdi/stormevents/csvfiles/')
filenames = ftp.nlst()
# retrieve a sorted list of the details files
details_files = []
for filename in filenames:
if not filename.startswith('StormEvents_details-ftp_v1.0_d'):
continue
details_files.append(filename)
details_files.sort()
# retrieve a sorted list of the locations files
locations_files = []
for filename in filenames:
if not filename.startswith('StormEvents_locations-ftp_v1.0_d'):
continue
locations_files.append(filename)
locations_files.sort()
def ftp_download(file_dir):
'''
download data
INPUT file_dir: ftp location of file to download (string)
'''
for filename in file_dir:
with open(os.path.join(data_dir, filename), 'wb') as fo:
ftp.retrbinary("RETR " + filename, fo.write)
# download data from the source FTP
ftp_download(details_files)
ftp_download(locations_files)
'''
Process data
'''
#Concatenating details and locations files
raw_data_file = glob.glob(os.path.join(data_dir, "*.gz")) # advisable to use os.path.join as this makes concatenation OS independent
details_list = []
locations_list = []
# go through each file, turn it into a dataframe, and append that df to one of two lists, based on if it
# is a details file or a locations file
for file in raw_data_file:
if file.startswith('data/StormEvents_details-ftp_v1.0_d'):
df = | pd.read_csv(file) | pandas.read_csv |
import numpy as np
import gegenbauer
import compute_NTK_spectrum
import matplotlib.pyplot as plt
import approx_learning_curves
import csv
import numba
from numba import jit
from numba import prange
import time
import pandas as pd
import argparse
def SGD(X, Y, Theta, r, num_iter, readout_only=False):
P = X.shape[0]
d = X.shape[1]
M = Theta.shape[0]
deltaTheta = Theta.copy()
batch_size = 40
r = np.zeros(M)
m_r = np.zeros(M)
v_r = np.zeros(M)
beta_1 = 0.9
beta_2 = 0.999
m_Theta = np.zeros((M,d))
v_Theta = np.zeros((M,d))
for t in range(num_iter):
#r_grad = np.zeros(M)
#Theta_grad = np.zeros((M,d))
r_grad = np.zeros((batch_size, M))
Theta_grad = np.zeros((batch_size, M, d))
if t % 500==0:
print("SGD epoch = %d" % t)
Z0 = Theta @ X.T
Z = np.maximum(Z0, np.zeros(Z0.shape))
E_tr = 1/P * np.linalg.norm(Z.T @ r - Y)**2
print("Etr = %e" % E_tr)
if E_tr < 1e-16:
break
g = np.zeros(M)
g_Theta = np.zeros((M,d))
# batch wise computation
inds = np.random.randint(0,P, batch_size)
x_t = X[inds, :]
y_t = Y[inds]
Z = Theta @ x_t.T
A = np.maximum(Z, np.zeros((M, batch_size)))
Deriv = np.heaviside(Z, np.zeros((M, batch_size)))
f_t = A.T @ r # batchsize
g = A @ (f_t - y_t)
r_deriv = np.outer(r, np.ones(batch_size)) * Deriv
f_y_x = x_t * np.outer(f_t - y_t, np.ones(d))
g_Theta = r_deriv @ f_y_x
m_r = beta_1*m_r + (1-beta_1)*g
v_r = beta_2*v_r + (1-beta_2)*(g**2)
m_hat = m_r / (1-beta_1)
v_hat = v_r / (1-beta_2)
m_Theta = beta_1 * m_Theta + (1-beta_1) * g_Theta
v_Theta = beta_2 * v_Theta + (1-beta_2) * g_Theta**2
m_Theta_hat = m_Theta / (1-beta_1)
v_Theta_hat = v_Theta / (1-beta_2)
delta_r = - 1e-3 / d * m_hat / (np.sqrt(v_hat) + 1e-8*np.ones(M))
delta_Theta = - 1e-3 /d * m_Theta_hat / (np.sqrt(v_Theta_hat) + 1e-8 *np.ones((M,d)))
r += delta_r
Theta += delta_Theta
Z0 = Theta @ X.T
Z = np.maximum(Z0, np.zeros(Z0.shape))
E_tr = 1/P * np.linalg.norm(Z.T @ r - Y)**2
return Theta, r, E_tr
def sample_random_points(num_pts, d):
R = np.random.multivariate_normal(np.zeros(d), np.eye(d), num_pts)
R = R* np.outer( np.linalg.norm(R, axis=1)**(-1), np.ones(d) )
return R
@jit(nopython=True, parallel=True)
def sample_random_points_jit(num_pts, d, R):
for i in prange(R.shape[0]):
for j in prange(R.shape[1]):
R[i,j] = np.random.standard_normal()
for i in prange(R.shape[0]):
R[i,:] = R[i,:] * (np.linalg.norm(R[i,:]) + 1e-10)**(-1)
return R
@jit(nopython = True)
def feedfoward(X, Theta, r):
Z0 = Theta @ X.T
Z = np.maximum(Z0, np.zeros(Z0.shape))
return Z.T @ r
def compute_kernel(X, Xp, spectrum, d, kmax):
P = X.shape[0]
Pp = Xp.shape[0]
gram = X @ Xp.T
gram = np.reshape(gram, P*Pp)
#Q = gegenbauer.get_gegenbauer(gram, kmax, d)
Q = gegenbauer.get_gegenbauer_fast2(gram, kmax, d)
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
K = Q.T @ (spectrum * degens)
#K = Q.T @ spectrum
K = np.reshape(K, (P,Pp))
return K
def get_gegenbauer_gram(Theta1, Theta2):
gram = Theta1 @ Theta2.T
M = Theta1.shape[0]
perc = 0
Q = gegenbauer.get_gegenbauer_fast2(np.reshape(gram, M**2), kmax, d)
return Q
#@jit(nopython=True)
def get_mode_errs(Theta, Theta_teach, r, r_teach, kmax, d, degens):
M = Theta.shape[0]
Q_ss = get_gegenbauer_gram(Theta, Theta)
Q_st = get_gegenbauer_gram(Theta, Theta_teach)
Q_tt = get_gegenbauer_gram(Theta_teach, Theta_teach)
mode_errs=np.zeros(kmax)
for k in range(kmax):
Q_ssk = np.reshape(Q_ss[k,:], (M,M))
Q_stk = np.reshape(Q_st[k,:], (M,M))
Q_ttk = np.reshape(Q_tt[k,:], (M,M))
mode_errs[k] = spectrum[k] * degens[k] * ( r.T @ Q_ssk @ r - 2*r.T @ Q_stk @ r_teach + r_teach.T @ Q_ttk @ r_teach )
return mode_errs
#@jit(nopython=True, parallel=True)
def generalization_expt(P, spectrum, M, d, kmax, num_repeats, Theta_teach, r_teach, num_test=1000):
all_mode_errs = np.zeros((num_repeats, kmax))
all_mc_errs = np.zeros(num_repeats)
all_training_errs = np.zeros(num_repeats)
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
print("P = %d" % P)
Theta = np.zeros((M,d))
#Theta_teach = np.zeros((M,d))
X_test = np.zeros((num_test, d))
X = np.zeros((P, d))
for t in range(num_repeats):
print("t=%d" %t)
start = time.time()
Theta = sample_random_points_jit(M, d, Theta)
r = np.random.standard_normal(M) / np.sqrt(M)
X = sample_random_points_jit(P, d, X)
Z_teach = np.maximum(Theta_teach @ X.T, np.zeros((M,P)) )
Y = Z_teach.T @ r_teach
num_iter = min(200*P, 40000)
Theta, r, E_tr = SGD(X, Y, Theta, r, num_iter, readout_only=False)
print("final Etr = %e" % E_tr)
counter = 1
print("finished SGD")
print("num tries: %d" % counter)
all_mode_errs[t,:] = get_mode_errs(Theta,Theta_teach, r, r_teach, kmax, d, degens)
end = time.time()
print("time = %lf" %(end -start))
X_test = sample_random_points_jit(num_test, d, X_test)
#X_test = sample_random_points(num_test, d)
Y_test = feedfoward(X_test, Theta_teach, r_teach)
Y_pred = feedfoward(X_test, Theta, r)
all_mc_errs[t] = 1/num_test * np.linalg.norm(Y_test-Y_pred)**2
all_training_errs[t] = E_tr
average_mode_errs = np.mean(all_mode_errs, axis = 0)
std_errs = np.std(all_mode_errs, axis=0)
average_mc =np.mean(all_mc_errs)
std_mc = np.std(all_mc_errs)
print("average MC = %e" % average_mc)
print("sum of modes = %e" % np.sum(average_mode_errs))
return average_mc, std_mc, np.mean(all_training_errs)
def compute_kernel(X, Xp, spectrum, d, kmax):
P = X.shape[0]
Pp = Xp.shape[0]
gram = X @ Xp.T
gram = np.reshape(gram, P*Pp)
#Q = gegenbauer.get_gegenbauer(gram, kmax, d)
Q = gegenbauer.get_gegenbauer_fast2(gram, kmax, d)
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
K = Q.T @ (spectrum * degens)
#K = Q.T @ spectrum
K = np.reshape(K, (P,Pp))
return K
def generalization_expt_kteach(P, spectrum, M, d, kmax, num_repeats, X_teach, alpha_teach, spectrum_teach, num_test=1000):
all_mode_errs = np.zeros((num_repeats, kmax))
all_mc_errs = np.zeros(num_repeats)
all_training_errs = np.zeros(num_repeats)
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
print("P = %d" % P)
Theta = np.zeros((M,d))
X_test = np.zeros((num_test, d))
X = np.zeros((P, d))
for t in range(num_repeats):
print("t=%d" %t)
start = time.time()
Theta = sample_random_points_jit(M, d, Theta)
r = np.random.standard_normal(M) / np.sqrt(M)
X = sample_random_points_jit(P, d, X)
K = compute_kernel(X_teach, X)
Y = K.T @ alpha_teach
num_iter = 3*P
Theta, r, E_tr = SGD(X, Y, Theta, r, num_iter, readout_only=False)
print("Etr = %e" % E_tr)
counter = 1
print("finished SGD")
print("num tries: %d" % counter)
all_mode_errs[t,:] = get_mode_errs(Theta,Theta_teach, r, r_teach, kmax, d, degens)
end = time.time()
print("time = %lf" %(end -start))
X_test = sample_random_points_jit(num_test, d, X_test)
#X_test = sample_random_points(num_test, d)
Y_test = feedfoward(X_test, Theta_teach, r_teach)
Y_pred = feedfoward(X_test, Theta, r)
all_mc_errs[t] = 1/num_test * np.linalg.norm(Y_test-Y_pred)**2
all_training_errs[t] = E_tr
average_mode_errs = np.mean(all_mode_errs, axis = 0)
std_errs = np.std(all_mode_errs, axis=0)
average_mc =np.mean(all_mc_errs)
std_mc = np.std(all_mc_errs)
print("average MC = %e" % average_mc)
print("sum of modes = %e" % np.sum(average_mode_errs))
return average_mc, std_mc, np.mean(all_training_errs)
parser = argparse.ArgumentParser()
parser.add_argument('--input_dim', type=int, default= 30,
help='data input dimension')
parser.add_argument('--M', type=int,
help='number of hidden units', default = 500)
args = parser.parse_args()
d = args.input_dim
M = args.M
kmax = 25
P_vals = [10,20,50,100,250,500]
num_repeats = 10
# calculate spectrum of teacher
spectrum = gegenbauer.calculate_activation_coeffs(kmax, d)**2
degens = np.array( [gegenbauer.degeneracy(d,k) for k in range(kmax)] )
# fix get effective spectrum for higher d
theory_spectrum = compute_NTK_spectrum.get_effective_spectrum([1], kmax, d, ker = 'NTK')[0,:]
theory_spectrum_hermite = compute_NTK_spectrum.get_effective_spectrum_hermite([2], kmax, d, ker='NTK')[0,:]
theory_spectrum_NNGP = compute_NTK_spectrum.get_effective_spectrum([1], kmax, d, ker = 'NNGP')[0,:]
theory_g_sqr, p = approx_learning_curves.simulate_uc(theory_spectrum, degens, lamb = 1e-10)
theory_g_sqr_NNGP, p = approx_learning_curves.simulate_uc(theory_spectrum_NNGP, degens, lamb = 1e-10)
theory_g_sqr_hermite, p = approx_learning_curves.simulate_uc(theory_spectrum_hermite, degens, lamb = 1e-8)
theory_gen = np.zeros(theory_g_sqr.shape)
theory_gen_NNGP = np.zeros(theory_g_sqr.shape)
theory_gen_hermite = np.zeros(theory_g_sqr.shape)
for k in range(kmax):
if spectrum[k] !=0:
theory_gen[:,k] = theory_g_sqr[:,k] / theory_spectrum[k]**2 * spectrum[k]
theory_gen_NNGP[:,k] = theory_g_sqr_NNGP[:,k] / theory_spectrum_NNGP[k]**2 * spectrum[k]
theory_gen_hermite[:,k] = theory_g_sqr_hermite[:,k] / theory_spectrum[k]**2 * spectrum[k]
#theory_gen[:,k] = theory_g_sqr[:,k] / spectrum[k] * M
colors = ['b','r','g', 'm', 'c']
kplot = [0,1,2,4,6]
mc_errs = np.zeros(len(P_vals))
std_mc_errs = np.zeros(len(P_vals))
training_errs = np.zeros(len(P_vals))
Theta_teach = sample_random_points(M, d)
r_teach = np.random.standard_normal(M) / np.sqrt(M)
for i in range(len(P_vals)):
P = P_vals[i]
av_mc, std_mc, E_tr = generalization_expt(P, spectrum, M, d, kmax, num_repeats, Theta_teach, r_teach)
mc_errs[i] = av_mc
std_mc_errs[i] = std_mc
training_errs[i] = E_tr
plt.rcParams.update({'font.size': 12})
plt.loglog(P_vals, training_errs)
plt.xlabel('P')
plt.ylabel(r'$E_{tr}$')
plt.savefig('train_errs.pdf')
plt.show()
colors = ['b','r','g', 'm', 'c']
mode_df = pd.DataFrame(mode_errs)
std_df = pd.DataFrame(std_errs)
training_df = | pd.DataFrame(training_errs) | pandas.DataFrame |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import FloatingArray
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
@pytest.fixture
def left_array():
return pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
@pytest.fixture
def right_array():
return | pd.array([True, False, None] * 3, dtype="boolean") | pandas.array |
#!/usr/bin/python
'''
This file holds the functions necessary to process the data behind the scenes
'''
import config
import json
import pandas as pd
import requests
import spotipy
from datetime import datetime
from flask import Flask
from flask import request
from numpy import nan
from spotipy.oauth2 import SpotifyClientCredentials
app = Flask(__name__)
app.config.from_object('config')
spotify_client_id = config.spotify_client_id.decode('utf-8')
spotify_client_secret = config.spotify_client_secret.decode('utf-8')
seatgeek_client_id = config.seatgeek_client_id
def process_daterange(daterange):
'''
Process daterange from datepicker input.
Args:
daterange (str): string with daterange formatted as
'YYYY-MM-DD to YYYY-MM-DD'
'''
daterange = daterange.strip()
if "to" in daterange:
date1, date2 = daterange.split(" to ")
else:
date1, date2 = (daterange, None)
return date1, date2
def get_concert_information(zipcode, date1, date2, dist=3, per_page=100,
client_id=seatgeek_client_id):
'''
Fetch concert information from seatgeek
Args:
zipcode (str): zipcode to search near
date1 (str): min date of search
date2 (str): max date of search
dist (str): distance (mi) around zipcode for search (default 3)
per_page (int): maximum number of results (default: 100)
client_id (str): seatgeek client id (default: from config.py)
'''
# Get dates in formate seatgeek likes
if not date2:
date2 = date1
datetime1 = f'{date1}T00:00:00'
datetime2 = f'{date2}T23:00:00'
# seatgeek API request
params = {"geoip": zipcode, "type": "concert",
"per_page": per_page, "range": f"{dist}mi",
"datetime_local.gte": datetime1, "datetime_local.lte": datetime2}
base_url = f"https://api.seatgeek.com/2/events?client_id={client_id}"
param_str = "&".join([f"{i}={v}" for i, v in params.items()])
response = requests.get(base_url + "&" + param_str)
data = response.json()
if response.status_code == 200:
# If there are no concerts raise exception
if len(data['events']) < 1:
raise NoConcertsFound
return data
else:
raise FailedApiRequestError
def build_df_and_get_spotify_info(data):
'''
Take cocert information from the seatgeek API, make a dataframe, and populate
with Spotify artist and track infomration
Args:
data (dict): dictionary from seatgeek api response
Returns:
pandas.DataFrame: dataframe with cncert and artist information in it
'''
# Use spotipy for its great support for large volume of requests
client_credentials_manager = SpotifyClientCredentials(client_id=spotify_client_id,
client_secret=spotify_client_secret)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
# Pull select data fields and put into pandas dataframe
df_dict = {}
for event in data['events']:
for performer in event['performers']:
d = {}
d['performer'] = performer['short_name']
try:
d['genre'] = performer['genres'][0]['name']
except KeyError:
d['genre'] = "NA"
d['datetime_local'] = event['datetime_local']
dt = datetime.strptime(d['datetime_local'], "%Y-%m-%dT%H:%M:%S")
d['date_local'] = dt.strftime("%b %d %Y")
d['time_local'] = dt.strftime("%I:%M%p")
d['event_id'] = event['id']
d['event_title'] = event['title']
d['venue_name'] = event['venue']['name']
d['venue_id'] = event['venue']['id']
d['venue_address'] = f"{event['venue']['address']}, {event['venue']['extended_address']}" # noqa
# # Spotify searching
spotify_artist_id, spotify_top_track_id = lookup_spotify_artist_track(sp, d['performer']) # noqa
d['spotify_artist_id'] = spotify_artist_id
d['spotify_top_track_id'] = spotify_top_track_id
# Performer information to dictionary
df_dict[performer['id']] = d
df = | pd.DataFrame(df_dict) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import numpy.linalg as LA
from scipy.sparse import csr_matrix
from sklearn.preprocessing import MinMaxScaler
def show_mtrx(m, title = None):
fig, ax = plt.subplots(figsize = (10, 5))
min_val = int(m.min())
max_val = int(m.max())
cax = ax.matshow(m, cmap=plt.cm.seismic)
fig.colorbar(cax, ticks=[min_val, int((min_val + max_val)/2), max_val])
plt.title(title)
plt.show()
def plot_results(MADs, MSEs):
f, axes = plt.subplots(1, 2, figsize=(10, 5))
if len(MADs) == 3:
mad = {"K": list(range(2, 2 + len(MADs[0]))), "xTx": MADs[0], "zTz": MADs[1], "rTr": MADs[2]}
else:
mad = {"K": list(range(2, 2 + len(MADs[0]))), "xTx": MADs[0], "zTz": MADs[1]}
df_mad = | pd.DataFrame(mad) | pandas.DataFrame |
import csv
import pandas as pd
import os
import numpy as np
BASE_DIR = os.getcwd()
def merge_dev_data(result_filename, file_pos, file_neg):
"""
Description: function that merges dev data from both
sentiments into a single data structure
Input:
-result_filename: str, name of the file to write the result to
-file_pos: str, name of file containing positive dev data
-file_neg: str, name of file containing negative dev data
"""
merged_data = []
with open(file_pos, errors="replace") as text:
txt = text.readlines()
merged_data += [(line, "positive") for line in txt]
text.close()
with open(file_neg, errors="replace") as text:
txt = text.readlines()
merged_data += [(line, "negative") for line in txt]
text.close()
df = pd.DataFrame(merged_data, columns=["text", "sentiment"])
df["text"] = df["text"].apply(lambda x: x.strip())
df = df.replace("", np.nan)
df = df[df["text"].notnull()]
df.to_csv(result_filename, index=False)
def merge_training_data(result_filename, original_dir, sentiment):
"""
Description: function that merges the training text files
for the positive and negative directories
Input:
-result_filename_pos: str, name of the file that will contain
training data for the given sentiment
-original_dir: str, the directory containing the text files
-sentiment: str, the sentiment of the given text files
"""
df = | pd.DataFrame() | pandas.DataFrame |
from aridanalysis import aridanalysis as aa
import pytest
import pandas as pd
import numpy as np
import sklearn
from vega_datasets import data
import altair as alt
import statsmodels
# import warnings
import sys
import os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + "/../aridanalysis")
import error_strings as errors # noqaE402
@pytest.fixture
def simple_frame():
"""
Create a basic test dataframe for linear regression tests
"""
tdf = pd.DataFrame(
{
"x1": [1, 0, 0],
"x2": [0, 1.0, 0],
"x3": [0, 0, 1],
"x4": ["a", "a", "b"],
"y": [1, 3, -1.0],
}
)
return tdf
def test_arideda_return():
"""
Test return data type
"""
_, out = aa.arid_eda(
data.iris(), "species", "categorical", ["sepalLength", "sepalWidth"]
)
assert isinstance(out, alt.HConcatChart)
def test_arideda_features():
"""
Test calling with valid features list
"""
out, _ = aa.arid_eda(
data.iris(), "species", "categorical", ["sepalLength", "sepalWidth"]
)
assert isinstance(out, pd.core.frame.DataFrame)
def test_arideda_numfeature():
"""
Ensure data frame is appropriate size according to features
"""
features = ["sepalLength", "sepalWidth"]
out, _ = aa.arid_eda(data.iris(), "species", "categorical", features)
assert out.shape == (8, len(features))
def test_arideda_returns_tuple():
"""
Check that function returns two items
"""
assert (
len(
aa.arid_eda(
data.iris(),
"species",
"categorical",
["sepalLength", "sepalWidth"]
)
)
== 2
)
def test_arideda_empty_df():
"""
Test if error occurs when repsonse type is not categorical or continuous
"""
with pytest.raises(AssertionError):
aa.arid_eda(
data.iris(),
"species",
"ORDINAL",
["sepalLength", "sepalWidth"])
def test_response_type_incorrect():
"""
Test if an error occurs when wrong response type is given
"""
with pytest.raises(AssertionError):
aa.arid_eda(
data.iris(),
"petalLength",
"categorical",
["sepalLength", "sepalWidth"]
)
def test_linreg_input_errors(simple_frame):
"""
Test linear regression input argument validation
"""
with pytest.raises(AssertionError, match=errors.INVALID_DATAFRAME):
aa.arid_linreg(6, "y")
with pytest.raises(AssertionError, match=errors.EMPTY_DATAFRAME):
aa.arid_linreg(pd.DataFrame(), "y")
with pytest.raises(AssertionError, match=errors.RESPONSE_NOT_FOUND):
aa.arid_linreg(simple_frame, "z")
with pytest.raises(AssertionError, match=errors.INVALID_RESPONSE_DATATYPE):
aa.arid_linreg(simple_frame, "x4")
with pytest.raises(AssertionError, match=errors.INVALID_REGULARIZATION_INPUT): # noqaE501
aa.arid_linreg(simple_frame, "y", regularization="L3")
with pytest.raises(AssertionError, match=errors.INVALID_ALPHA_INPUT):
aa.arid_linreg(simple_frame, "y", alpha="b")
def test_linreg_input_features(simple_frame):
"""
Test linear regression input feature arguments
"""
with pytest.raises(AssertionError, match=errors.NO_VALID_FEATURES):
aa.arid_linreg(simple_frame[["y"]], "y")
with pytest.raises(AssertionError, match=errors.NO_VALID_FEATURES):
aa.arid_linreg(simple_frame[["x4", "y"]], "y")
with pytest.raises(AssertionError, match=errors.NO_VALID_FEATURES):
aa.arid_linreg(simple_frame, "y", features=["b"])
with pytest.raises(AssertionError, match=errors.NO_VALID_FEATURES):
aa.arid_linreg(simple_frame, "y", features=["x4"])
with pytest.warns(UserWarning):
aa.arid_linreg(simple_frame, "y", features=["x1", "x2", "x3", "x4"])
with pytest.warns(UserWarning):
aa.arid_linreg(simple_frame, "y", features=["x1", "b"])
assert len((aa.arid_linreg(simple_frame, "y"))[0].coef_) == 3
assert (
len((aa.arid_linreg(simple_frame,
"y",
features=simple_frame.columns))[0].coef_)
== 3
)
assert (
len((aa.arid_linreg(simple_frame,
"y",
features=["x1", "x2", "x3"]))[0].coef_)
== 3
)
assert (
len(
(aa.arid_linreg(simple_frame,
"y",
features=["x1", "x2", "x3", "x4"]))[
0
].coef_
)
== 3
)
assert len((aa.arid_linreg(
simple_frame,
"y",
features=["x1"]))[0].coef_) == 1
assert len((aa.arid_linreg(
simple_frame,
"y",
features=["x1", "x2"]))[0].coef_) == 2
assert len((aa.arid_linreg(simple_frame, "y"))[1].params) == 3
assert (
len(
(aa.arid_linreg(
simple_frame,
"y",
features=simple_frame.columns))[1].params
)
== 3
)
assert (
len((aa.arid_linreg(
simple_frame,
"y",
features=["x1", "x2", "x3"]))[1].params)
== 3
)
assert (
len(
(aa.arid_linreg(
simple_frame,
"y",
features=["x1", "x2", "x3", "x4"]))[
1
].params
)
== 3
)
assert len((aa.arid_linreg(
simple_frame,
"y",
features=["x1"]))[1].params) == 1
assert (
len((aa.arid_linreg(
simple_frame,
"y",
features=["x1", "x2"]))[1].params) == 2
)
def test_linreg_model_types(simple_frame):
"""
Test linear regression output model types
"""
assert (
type((aa.arid_linreg(simple_frame, "y"))[0])
== sklearn.linear_model._base.LinearRegression
)
assert (
type((aa.arid_linreg(simple_frame, "y", regularization="L1"))[0])
== sklearn.linear_model._coordinate_descent.Lasso
)
assert (
type((aa.arid_linreg(simple_frame, "y", regularization="L2"))[0])
== sklearn.linear_model._ridge.Ridge
)
assert (
type((aa.arid_linreg(simple_frame, "y", regularization="L1L2"))[0])
== sklearn.linear_model._coordinate_descent.ElasticNet
)
assert (
type((aa.arid_linreg(simple_frame, "y"))[1])
== statsmodels.regression.linear_model.RegressionResultsWrapper
)
assert (
type((aa.arid_linreg(simple_frame, "y", regularization="L1"))[1])
== statsmodels.base.elastic_net.RegularizedResultsWrapper
)
assert (
type((aa.arid_linreg(simple_frame, "y", regularization="L2"))[1])
== statsmodels.base.elastic_net.RegularizedResults
)
assert (
type((aa.arid_linreg(simple_frame, "y", regularization="L1L2"))[1])
== statsmodels.base.elastic_net.RegularizedResultsWrapper
)
def test_linreg_model_coefficients(simple_frame):
"""
Test statsmodel & sklearn model coefficients match
"""
assert (
aa.arid_linreg(simple_frame, "y")[0].coef_.all()
== (aa.arid_linreg(simple_frame, "y")[1].params).to_numpy().all()
)
assert (
aa.arid_linreg(simple_frame, "y", regularization="L1")[0].coef_.all()
== (aa.arid_linreg(simple_frame, "y", regularization="L1")[1].params)
.to_numpy()
.all()
)
assert (
aa.arid_linreg(simple_frame, "y", regularization="L2")[0].coef_.all()
== (aa.arid_linreg(simple_frame, "y", regularization="L2")[1].params).all() # noqaE501
)
assert (
aa.arid_linreg(simple_frame, "y", regularization="L1L2")[0].coef_.all()
== (aa.arid_linreg(simple_frame, "y", regularization="L1L2")[1].params)
.to_numpy()
.all()
)
def test_linreg_model_predictions(simple_frame):
"""
Test statsmodel and sklearn model predictions match
"""
assert round(
aa.arid_linreg(simple_frame, "y")[0].predict(np.array([[1, 4, 3]]))[0], 3 # noqaE501
) == round(
(aa.arid_linreg(simple_frame, "y")[1].predict(np.array([[1, 4, 3]])))[0], 3 # noqaE501
)
assert round(
aa.arid_linreg(simple_frame, "y", regularization="L1")[0].predict(
np.array([[1, 4, 3]])
)[0],
3,
) == round(
(
aa.arid_linreg(simple_frame, "y", regularization="L1")[1].predict(
np.array([[1, 4, 3]])
)
)[0],
3,
)
assert round(
aa.arid_linreg(simple_frame, "y", regularization="L2")[0].predict(
np.array([[1, 4, 3]])
)[0],
3,
) == round(
(
aa.arid_linreg(simple_frame, "y", regularization="L2")[1].predict(
np.array([[1, 4, 3]])
)
)[0],
3,
)
assert round(
aa.arid_linreg(simple_frame, "y", regularization="L1L2")[0].predict(
np.array([[1, 4, 3]])
)[0],
3,
) == round(
aa.arid_linreg(simple_frame, "y", regularization="L1L2")[1].predict(
np.array([[1, 4, 3]])
)[0],
3,
)
@pytest.fixture
def log_df():
"""
Create a basic test dataframe for logistic regression tests
"""
data = [
[32, "male", 80, 0],
[26, "female", 65, 1],
[22, "female", 75, 1],
[36, "male", 85, 0],
[45, "male", 82, 1],
[18, "female", 57, 0],
[57, "male", 60, 1],
]
log_df = | pd.DataFrame(data, columns=["Age", "Sex", "Weight", "Target"]) | pandas.DataFrame |
#!/usr/bin/env python3
import argparse
import collections
import copy
import datetime
import functools
import glob
import json
import logging
import math
import operator
import os
import os.path
import re
import sys
import typing
import warnings
import matplotlib
import matplotlib.cm
import matplotlib.dates
import matplotlib.pyplot
import matplotlib.ticker
import networkx
import numpy
import pandas
import tabulate
import tqdm
import rows.console
import rows.load
import rows.location_finder
import rows.model.area
import rows.model.carer
import rows.model.datetime
import rows.model.historical_visit
import rows.model.history
import rows.model.json
import rows.model.location
import rows.model.metadata
import rows.model.past_visit
import rows.model.problem
import rows.model.rest
import rows.model.schedule
import rows.model.service_user
import rows.model.visit
import rows.parser
import rows.plot
import rows.routing_server
import rows.settings
import rows.sql_data_source
def handle_exception(exc_type, exc_value, exc_traceback):
"""Logs uncaught exceptions"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
else:
logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
__COMMAND = 'command'
__PULL_COMMAND = 'pull'
__INFO_COMMAND = 'info'
__SHOW_WORKING_HOURS_COMMAND = 'show-working-hours'
__COMPARE_BOX_PLOTS_COMMAND = 'compare-box-plots'
__COMPARE_DISTANCE_COMMAND = 'compare-distance'
__COMPARE_WORKLOAD_COMMAND = 'compare-workload'
__COMPARE_QUALITY_COMMAND = 'compare-quality'
__COMPARE_COST_COMMAND = 'compare-cost'
__CONTRAST_WORKLOAD_COMMAND = 'contrast-workload'
__COMPARE_PREDICTION_ERROR_COMMAND = 'compare-prediction-error'
__COMPARE_BENCHMARK_COMMAND = 'compare-benchmark'
__COMPARE_BENCHMARK_TABLE_COMMAND = 'compare-benchmark-table'
__COMPARE_LITERATURE_TABLE_COMMAND = 'compare-literature-table'
__COMPARE_THIRD_STAGE_PLOT_COMMAND = 'compare-third-stage-plot'
__COMPARE_THIRD_STAGE_TABLE_COMMAND = 'compare-third-stage-table'
__COMPARE_THIRD_STAGE_SUMMARY_COMMAND = 'compare-third-stage-summary'
__COMPARE_QUALITY_OPTIMIZER_COMMAND = 'compare-quality-optimizer'
__COMPUTE_RISKINESS_COMMAND = 'compute-riskiness'
__COMPARE_DELAY_COMMAND = 'compare-delay'
__TYPE_ARG = 'type'
__ACTIVITY_TYPE = 'activity'
__VISITS_TYPE = 'visits'
__COMPARE_TRACE_COMMAND = 'compare-trace'
__CONTRAST_TRACE_COMMAND = 'contrast-trace'
__COST_FUNCTION_TYPE = 'cost_function'
__DEBUG_COMMAND = 'debug'
__AREA_ARG = 'area'
__FROM_ARG = 'from'
__TO_ARG = 'to'
__FILE_ARG = 'file'
__DATE_ARG = 'date'
__BASE_FILE_ARG = 'base-file'
__CANDIDATE_FILE_ARG = 'candidate-file'
__SOLUTION_FILE_ARG = 'solution'
__PROBLEM_FILE_ARG = 'problem'
__OUTPUT_PREFIX_ARG = 'output_prefix'
__OPTIONAL_ARG_PREFIX = '--'
__BASE_SCHEDULE_PATTERN = 'base_schedule_pattern'
__CANDIDATE_SCHEDULE_PATTERN = 'candidate_schedule_pattern'
__SCHEDULE_PATTERNS = 'schedule_patterns'
__LABELS = 'labels'
__OUTPUT = 'output'
__ARROWS = 'arrows'
__FILE_FORMAT_ARG = 'output_format'
__color_map = matplotlib.pyplot.get_cmap('tab20c')
FOREGROUND_COLOR = __color_map.colors[0]
FOREGROUND_COLOR2 = 'black'
def get_or_raise(obj, prop):
value = getattr(obj, prop)
if not value:
raise ValueError('{0} not set'.format(prop))
return value
def get_date_time(value):
date_time = datetime.datetime.strptime(value, '%Y-%m-%d')
return date_time
def get_date(value):
value_to_use = get_date_time(value)
return value_to_use.date()
def configure_parser():
parser = argparse.ArgumentParser(prog=sys.argv[0],
description='Robust Optimization '
'for Workforce Scheduling command line utility')
subparsers = parser.add_subparsers(dest=__COMMAND)
pull_parser = subparsers.add_parser(__PULL_COMMAND)
pull_parser.add_argument(__AREA_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FROM_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __TO_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT_PREFIX_ARG)
info_parser = subparsers.add_parser(__INFO_COMMAND)
info_parser.add_argument(__FILE_ARG)
compare_distance_parser = subparsers.add_parser(__COMPARE_DISTANCE_COMMAND)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __PROBLEM_FILE_ARG, required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __SCHEDULE_PATTERNS, nargs='+', required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __LABELS, nargs='+', required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FILE_FORMAT_ARG, default=rows.plot.FILE_FORMAT)
compare_workload_parser = subparsers.add_parser(__COMPARE_WORKLOAD_COMMAND)
compare_workload_parser.add_argument(__PROBLEM_FILE_ARG)
compare_workload_parser.add_argument(__BASE_SCHEDULE_PATTERN)
compare_workload_parser.add_argument(__CANDIDATE_SCHEDULE_PATTERN)
compare_workload_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FILE_FORMAT_ARG, default=rows.plot.FILE_FORMAT)
debug_parser = subparsers.add_parser(__DEBUG_COMMAND)
# debug_parser.add_argument(__PROBLEM_FILE_ARG)
# debug_parser.add_argument(__SOLUTION_FILE_ARG)
compare_trace_parser = subparsers.add_parser(__COMPARE_TRACE_COMMAND)
compare_trace_parser.add_argument(__PROBLEM_FILE_ARG)
compare_trace_parser.add_argument(__FILE_ARG)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __COST_FUNCTION_TYPE, required=True)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __DATE_ARG, type=get_date)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __ARROWS, type=bool, default=False)
contrast_workload_parser = subparsers.add_parser(__CONTRAST_WORKLOAD_COMMAND)
contrast_workload_parser.add_argument(__PROBLEM_FILE_ARG)
contrast_workload_parser.add_argument(__BASE_FILE_ARG)
contrast_workload_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_workload_parser.add_argument(__OPTIONAL_ARG_PREFIX + __TYPE_ARG)
compare_prediction_error_parser = subparsers.add_parser(__COMPARE_PREDICTION_ERROR_COMMAND)
compare_prediction_error_parser.add_argument(__BASE_FILE_ARG)
compare_prediction_error_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_trace_parser = subparsers.add_parser(__CONTRAST_TRACE_COMMAND)
contrast_trace_parser.add_argument(__PROBLEM_FILE_ARG)
contrast_trace_parser.add_argument(__BASE_FILE_ARG)
contrast_trace_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __DATE_ARG, type=get_date, required=True)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __COST_FUNCTION_TYPE, required=True)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
show_working_hours_parser = subparsers.add_parser(__SHOW_WORKING_HOURS_COMMAND)
show_working_hours_parser.add_argument(__FILE_ARG)
show_working_hours_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_quality_parser = subparsers.add_parser(__COMPARE_QUALITY_COMMAND)
compare_quality_optimizer_parser = subparsers.add_parser(__COMPARE_QUALITY_OPTIMIZER_COMMAND)
compare_quality_optimizer_parser.add_argument(__FILE_ARG)
subparsers.add_parser(__COMPARE_COST_COMMAND)
compare_benchmark_parser = subparsers.add_parser(__COMPARE_BENCHMARK_COMMAND)
compare_benchmark_parser.add_argument(__FILE_ARG)
subparsers.add_parser(__COMPARE_LITERATURE_TABLE_COMMAND)
subparsers.add_parser(__COMPARE_BENCHMARK_TABLE_COMMAND)
subparsers.add_parser(__COMPUTE_RISKINESS_COMMAND)
subparsers.add_parser(__COMPARE_DELAY_COMMAND)
subparsers.add_parser(__COMPARE_THIRD_STAGE_TABLE_COMMAND)
subparsers.add_parser(__COMPARE_THIRD_STAGE_PLOT_COMMAND)
compare_box_parser = subparsers.add_parser(__COMPARE_BOX_PLOTS_COMMAND)
compare_box_parser.add_argument(__PROBLEM_FILE_ARG)
compare_box_parser.add_argument(__BASE_FILE_ARG)
compare_box_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
third_stage_summary_parser = subparsers.add_parser(__COMPARE_THIRD_STAGE_SUMMARY_COMMAND)
third_stage_summary_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
return parser
def split_delta(delta: datetime.timedelta) -> typing.Tuple[int, int, int, int]:
days = int(delta.days)
hours = int((delta.total_seconds() - 24 * 3600 * days) // 3600)
minutes = int((delta.total_seconds() - 24 * 3600 * days - 3600 * hours) // 60)
seconds = int(delta.total_seconds() - 24 * 3600 * days - 3600 * hours - 60 * minutes)
assert hours < 24
assert minutes < 60
assert seconds < 60
return days, hours, minutes, seconds
def get_time_delta_label(total_travel_time: datetime.timedelta) -> str:
days, hours, minutes, seconds = split_delta(total_travel_time)
time = '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
if days == 0:
return time
elif days == 1:
return '1 day ' + time
else:
return '{0} days '.format(days) + time
def pull(args, settings):
area_code = get_or_raise(args, __AREA_ARG)
from_raw_date = get_or_raise(args, __FROM_ARG)
to_raw_date = get_or_raise(args, __TO_ARG)
output_prefix = get_or_raise(args, __OUTPUT_PREFIX_ARG)
console = rows.console.Console()
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
location_cache = rows.location_finder.FileSystemCache(settings)
location_finder = rows.location_finder.MultiModeLocationFinder(location_cache, user_tag_finder, timeout=5.0)
data_source = rows.sql_data_source.SqlDataSource(settings, console, location_finder)
from_date_time = get_date_time(from_raw_date)
to_date_time = get_date_time(to_raw_date)
current_date_time = from_date_time
while current_date_time <= to_date_time:
schedule = data_source.get_past_schedule(rows.model.area.Area(code=area_code), current_date_time.date())
for visit in schedule.visits:
visit.visit.address = None
output_file = '{0}_{1}.json'.format(output_prefix, current_date_time.date().strftime('%Y%m%d'))
with open(output_file, 'w') as output_stream:
json.dump(schedule, output_stream, cls=rows.model.json.JSONEncoder)
current_date_time += datetime.timedelta(days=1)
def get_travel_time(schedule, user_tag_finder):
routes = schedule.routes()
total_travel_time = datetime.timedelta()
with rows.plot.create_routing_session() as session:
for route in routes:
visit_it = iter(route.visits)
current_visit = next(visit_it, None)
current_location = user_tag_finder.find(int(current_visit.visit.service_user))
while current_visit:
prev_location = current_location
current_visit = next(visit_it, None)
if not current_visit:
break
current_location = user_tag_finder.find(int(current_visit.visit.service_user))
travel_time_sec = session.distance(prev_location, current_location)
if travel_time_sec:
total_travel_time += datetime.timedelta(seconds=travel_time_sec)
return total_travel_time
def info(args, settings):
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
user_tag_finder.reload()
schedule_file = get_or_raise(args, __FILE_ARG)
schedule_file_to_use = os.path.realpath(os.path.expandvars(schedule_file))
schedule = rows.load.load_schedule(schedule_file_to_use)
carers = {visit.carer for visit in schedule.visits}
print(get_travel_time(schedule, user_tag_finder), len(carers), len(schedule.visits))
def compare_distance(args, settings):
schedule_patterns = getattr(args, __SCHEDULE_PATTERNS)
labels = getattr(args, __LABELS)
output_file = getattr(args, __OUTPUT, 'distance')
output_file_format = getattr(args, __FILE_FORMAT_ARG)
data_frame_file = 'data_frame_cache.bin'
if os.path.isfile(data_frame_file):
data_frame = pandas.read_pickle(data_frame_file)
else:
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
store = []
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for label, schedule_pattern in zip(labels, schedule_patterns):
for schedule_path in glob.glob(schedule_pattern):
schedule = rows.load.load_schedule(schedule_path)
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(schedule)
frame = rows.plot.get_schedule_data_frame(schedule, problem, duration_estimator, distance_estimator)
visits = frame['Visits'].sum()
carers = len(frame.where(frame['Visits'] > 0))
idle_time = frame['Availability'] - frame['Travel'] - frame['Service']
idle_time[idle_time < pandas.Timedelta(0)] = pandas.Timedelta(0)
overtime = frame['Travel'] + frame['Service'] - frame['Availability']
overtime[overtime < pandas.Timedelta(0)] = pandas.Timedelta(0)
store.append({'Label': label,
'Date': schedule.metadata.begin,
'Availability': frame['Availability'].sum(),
'Travel': frame['Travel'].sum(),
'Service': frame['Service'].sum(),
'Idle': idle_time.sum(),
'Overtime': overtime.sum(),
'Carers': carers,
'Visits': visits})
data_frame = pandas.DataFrame(store)
data_frame.sort_values(by=['Date'], inplace=True)
data_frame.to_pickle(data_frame_file)
condensed_frame = pandas.pivot(data_frame, columns='Label', values='Travel', index='Date')
condensed_frame['Improvement'] = condensed_frame['2nd Stage'] - condensed_frame['3rd Stage']
condensed_frame['RelativeImprovement'] = condensed_frame['Improvement'] / condensed_frame['2nd Stage']
color_map = matplotlib.cm.get_cmap('Set1')
matplotlib.pyplot.set_cmap(color_map)
figure, ax = matplotlib.pyplot.subplots(1, 1, sharex=True)
try:
width = 0.20
dates = data_frame['Date'].unique()
time_delta_convert = rows.plot.TimeDeltaConverter()
indices = numpy.arange(1, len(dates) + 1, 1)
handles = []
position = 0
for color_number, label in enumerate(labels):
data_frame_to_use = data_frame[data_frame['Label'] == label]
handle = ax.bar(indices + position * width,
time_delta_convert(data_frame_to_use['Travel']),
width,
color=color_map.colors[color_number],
bottom=time_delta_convert.zero)
handles.append(handle)
position += 1
ax.yaxis_date()
yaxis_converter = rows.plot.CumulativeHourMinuteConverter()
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(yaxis_converter))
ax.set_ylabel('Total Travel Time [hh:mm:ss]')
ax.set_yticks([time_delta_convert.zero + datetime.timedelta(seconds=seconds) for seconds in range(0, 30 * 3600, 4 * 3600 + 1)])
ax.set_xlabel('Day of October 2017')
translate_labels = {
'3rd Stage': '3rd Stage',
'Human Planners': 'Human Planners'
}
labels_to_use = [translate_labels[label] if label in translate_labels else label for label in labels]
rows.plot.add_legend(ax, handles, labels_to_use, ncol=3, loc='lower center', bbox_to_anchor=(0.5, -0.25)) # , bbox_to_anchor=(0.5, -1.1)
figure.tight_layout()
figure.subplots_adjust(bottom=0.20)
rows.plot.save_figure(output_file, output_file_format)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
# figure, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(3, 1, sharex=True)
# try:
# width = 0.20
# dates = data_frame['Date'].unique()
# time_delta_convert = rows.plot.TimeDeltaConverter()
# indices = numpy.arange(1, len(dates) + 1, 1)
#
# handles = []
# position = 0
# for label in labels:
# data_frame_to_use = data_frame[data_frame['Label'] == label]
#
# handle = ax1.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Travel']),
# width,
# bottom=time_delta_convert.zero)
#
# ax2.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Idle']),
# width,
# bottom=time_delta_convert.zero)
#
# ax3.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Overtime']),
# width,
# bottom=time_delta_convert.zero)
#
# handles.append(handle)
# position += 1
#
# ax1.yaxis_date()
# ax1.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax1.set_ylabel('Travel Time')
#
# ax2.yaxis_date()
# ax2.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax2.set_ylabel('Idle Time')
#
# ax3.yaxis_date()
# ax3.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax3.set_ylabel('Total Overtime')
# ax3.set_xlabel('Day of October 2017')
#
# translate_labels = {
# '3rd Stage': 'Optimizer',
# 'Human Planners': 'Human Planners'
# }
# labels_to_use = [translate_labels[label] if label in translate_labels else label for label in labels]
#
# rows.plot.add_legend(ax3, handles, labels_to_use, ncol=3, loc='lower center', bbox_to_anchor=(0.5, -1.1))
# figure.tight_layout()
# figure.subplots_adjust(bottom=0.20)
#
# rows.plot.save_figure(output_file, output_file_format)
# finally:
# matplotlib.pyplot.cla()
# matplotlib.pyplot.close(figure)
def calculate_forecast_visit_duration(problem):
forecast_visit_duration = rows.plot.VisitDict()
for recurring_visits in problem.visits:
for local_visit in recurring_visits.visits:
forecast_visit_duration[local_visit] = local_visit.duration
return forecast_visit_duration
def compare_workload(args, settings):
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
diary_by_date_by_carer = collections.defaultdict(dict)
for carer_shift in problem.carers:
for diary in carer_shift.diaries:
diary_by_date_by_carer[diary.date][carer_shift.carer.sap_number] = diary
base_schedules = {rows.load.load_schedule(file_path): file_path
for file_path in glob.glob(getattr(args, __BASE_SCHEDULE_PATTERN))}
base_schedule_by_date = {schedule.metadata.begin: schedule for schedule in base_schedules}
candidate_schedules = {rows.load.load_schedule(file_path): file_path
for file_path in glob.glob(getattr(args, __CANDIDATE_SCHEDULE_PATTERN))}
candidate_schedule_by_date = {schedule.metadata.begin: schedule for schedule in candidate_schedules}
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
output_file_format = getattr(args, __FILE_FORMAT_ARG)
dates = set(candidate_schedule_by_date.keys())
for date in base_schedule_by_date.keys():
dates.add(date)
dates = list(dates)
dates.sort()
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for date in dates:
base_schedule = base_schedule_by_date.get(date, None)
if not base_schedule:
logging.error('No base schedule is available for %s', date)
continue
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(base_schedule)
candidate_schedule = candidate_schedule_by_date.get(date, None)
if not candidate_schedule:
logging.error('No candidate schedule is available for %s', date)
continue
base_schedule_file = base_schedules[base_schedule]
base_schedule_data_frame = rows.plot.get_schedule_data_frame(base_schedule, problem, duration_estimator, distance_estimator)
base_schedule_stem, base_schedule_ext = os.path.splitext(os.path.basename(base_schedule_file))
rows.plot.save_workforce_histogram(base_schedule_data_frame, base_schedule_stem, output_file_format)
candidate_schedule_file = candidate_schedules[candidate_schedule]
candidate_schedule_data_frame = rows.plot.get_schedule_data_frame(candidate_schedule, problem, duration_estimator, distance_estimator)
candidate_schedule_stem, candidate_schedule_ext \
= os.path.splitext(os.path.basename(candidate_schedule_file))
rows.plot.save_workforce_histogram(candidate_schedule_data_frame,
candidate_schedule_stem,
output_file_format)
rows.plot.save_combined_histogram(candidate_schedule_data_frame,
base_schedule_data_frame,
['2nd Stage', '3rd Stage'],
'contrast_workforce_{0}_combined'.format(date),
output_file_format)
def contrast_workload(args, settings):
__WIDTH = 0.35
__FORMAT = 'svg'
plot_type = getattr(args, __TYPE_ARG, None)
if plot_type != __ACTIVITY_TYPE and plot_type != __VISITS_TYPE:
raise ValueError(
'Unknown plot type: {0}. Use either {1} or {2}.'.format(plot_type, __ACTIVITY_TYPE, __VISITS_TYPE))
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
base_schedule = rows.load.load_schedule(get_or_raise(args, __BASE_FILE_ARG))
candidate_schedule = rows.load.load_schedule(get_or_raise(args, __CANDIDATE_FILE_ARG))
if base_schedule.metadata.begin != candidate_schedule.metadata.begin:
raise ValueError('Schedules begin at a different date: {0} vs {1}'
.format(base_schedule.metadata.begin, candidate_schedule.metadata.begin))
if base_schedule.metadata.end != candidate_schedule.metadata.end:
raise ValueError('Schedules end at a different date: {0} vs {1}'
.format(base_schedule.metadata.end, candidate_schedule.metadata.end))
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
diary_by_date_by_carer = collections.defaultdict(dict)
for carer_shift in problem.carers:
for diary in carer_shift.diaries:
diary_by_date_by_carer[diary.date][carer_shift.carer.sap_number] = diary
date = base_schedule.metadata.begin
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
with rows.plot.create_routing_session() as routing_session:
observed_duration_by_visit = calculate_expected_visit_duration(candidate_schedule)
base_schedule_frame = rows.plot.get_schedule_data_frame(base_schedule,
routing_session,
location_finder,
diary_by_date_by_carer[date],
observed_duration_by_visit)
candidate_schedule_frame = rows.plot.get_schedule_data_frame(candidate_schedule,
routing_session,
location_finder,
diary_by_date_by_carer[date],
observed_duration_by_visit)
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
figure, axis = matplotlib.pyplot.subplots()
matplotlib.pyplot.tight_layout()
try:
contrast_frame = pandas.DataFrame.merge(base_schedule_frame,
candidate_schedule_frame,
on='Carer',
how='left',
suffixes=['_Base', '_Candidate'])
contrast_frame['Visits_Candidate'] = contrast_frame['Visits_Candidate'].fillna(0)
contrast_frame['Availability_Candidate'] \
= contrast_frame['Availability_Candidate'].mask(pandas.isnull, contrast_frame['Availability_Base'])
contrast_frame['Travel_Candidate'] \
= contrast_frame['Travel_Candidate'].mask(pandas.isnull, datetime.timedelta())
contrast_frame['Service_Candidate'] \
= contrast_frame['Service_Candidate'].mask(pandas.isnull, datetime.timedelta())
contrast_frame = contrast_frame.sort_values(
by=['Availability_Candidate', 'Service_Candidate', 'Travel_Candidate'],
ascending=False)
if plot_type == __VISITS_TYPE:
indices = numpy.arange(len(contrast_frame.index))
base_handle = axis.bar(indices, contrast_frame['Visits_Base'], __WIDTH)
candidate_handle = axis.bar(indices + __WIDTH, contrast_frame['Visits_Candidate'], __WIDTH)
axis.legend((base_handle, candidate_handle),
('Human Planners', 'Constraint Programming'), loc='best')
output_file = problem_file_name + '_contrast_visits_' + date.isoformat() + '.' + __FORMAT
elif plot_type == __ACTIVITY_TYPE:
indices = numpy.arange(len(base_schedule_frame.index))
def plot_activity_stacked_histogram(availability, travel, service, axis, width=0.35, initial_width=0.0,
color_offset=0):
time_delta_converter = rows.plot.TimeDeltaConverter()
travel_series = numpy.array(time_delta_converter(travel))
service_series = numpy.array(time_delta_converter(service))
idle_overtime_series = list(availability - travel - service)
idle_series = numpy.array(time_delta_converter(
map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(time_delta_converter(
map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(),
idle_overtime_series)))
service_handle = axis.bar(indices + initial_width, service_series,
width,
bottom=time_delta_converter.zero,
color=color_map.colors[0 + color_offset])
travel_handle = axis.bar(indices + initial_width,
travel_series,
width,
bottom=service_series + time_delta_converter.zero_num,
color=color_map.colors[2 + color_offset])
idle_handle = axis.bar(indices + initial_width,
idle_series,
width,
bottom=service_series + travel_series + time_delta_converter.zero_num,
color=color_map.colors[4 + color_offset])
overtime_handle = axis.bar(indices + initial_width,
overtime_series,
width,
bottom=idle_series + service_series + travel_series + time_delta_converter.zero_num,
color=color_map.colors[6 + color_offset])
return service_handle, travel_handle, idle_handle, overtime_handle
travel_candidate_handle, service_candidate_handle, idle_candidate_handle, overtime_candidate_handle \
= plot_activity_stacked_histogram(contrast_frame.Availability_Candidate,
contrast_frame.Travel_Candidate,
contrast_frame.Service_Candidate,
axis,
__WIDTH)
travel_base_handle, service_base_handle, idle_base_handle, overtime_base_handle \
= plot_activity_stacked_histogram(contrast_frame.Availability_Base,
contrast_frame.Travel_Base,
contrast_frame.Service_Base,
axis,
__WIDTH,
__WIDTH,
1)
axis.yaxis_date()
axis.yaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
axis.legend(
(travel_candidate_handle, service_candidate_handle, idle_candidate_handle, overtime_candidate_handle,
travel_base_handle, service_base_handle, idle_base_handle, overtime_base_handle),
('', '', '', '', 'Service', 'Travel', 'Idle', 'Overtime'), loc='best', ncol=2, columnspacing=0)
output_file = problem_file_name + '_contrast_activity_' + date.isoformat() + '.' + __FORMAT
bottom, top = axis.get_ylim()
axis.set_ylim(bottom, top + 0.025)
else:
raise ValueError('Unknown plot type {0}'.format(plot_type))
matplotlib.pyplot.subplots_adjust(left=0.125)
matplotlib.pyplot.savefig(output_file, format=__FORMAT, dpi=300)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def parse_time_delta(text):
if text:
time = datetime.datetime.strptime(text, '%H:%M:%S').time()
return datetime.timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)
return None
class TraceLog:
__STAGE_PATTERN = re.compile('^\w+(?P<number>\d+)(:?\-Patch)?$')
__PENALTY_PATTERN = re.compile('^MissedVisitPenalty:\s+(?P<penalty>\d+)$')
__CARER_USED_PATTERN = re.compile('^CarerUsedPenalty:\s+(?P<penalty>\d+)$')
class ProgressMessage:
def __init__(self, **kwargs):
self.__branches = kwargs.get('branches', None)
self.__cost = kwargs.get('cost', None)
self.__dropped_visits = kwargs.get('dropped_visits', None)
self.__memory_usage = kwargs.get('memory_usage', None)
self.__solutions = kwargs.get('solutions', None)
self.__wall_time = parse_time_delta(kwargs.get('wall_time', None))
@property
def cost(self):
return self.__cost
@property
def solutions(self):
return self.__solutions
@property
def dropped_visits(self):
return self.__dropped_visits
class ProblemMessage:
def __init__(self, **kwargs):
self.__carers = kwargs.get('carers', None)
self.__visits = kwargs.get('visits', None)
self.__date = kwargs.get('date', None)
if self.__date:
self.__date = datetime.datetime.strptime(self.__date, '%Y-%b-%d').date()
self.__visit_time_windows = parse_time_delta(kwargs.get('visit_time_windows', None))
self.__break_time_windows = parse_time_delta(kwargs.get('break_time_windows', None))
self.__shift_adjustment = parse_time_delta(kwargs.get('shift_adjustment', None))
self.__area = kwargs.get('area', None)
self.__missed_visit_penalty = kwargs.get('missed_visit_penalty', None)
self.__carer_used_penalty = kwargs.get('carer_used_penalty', None)
@property
def date(self):
return self.__date
@property
def carers(self):
return self.__carers
@property
def visits(self):
return self.__visits
@property
def visit_time_window(self):
return self.__visit_time_windows
@property
def carer_used_penalty(self):
return self.__carer_used_penalty
@carer_used_penalty.setter
def carer_used_penalty(self, value):
self.__carer_used_penalty = value
@property
def missed_visit_penalty(self):
return self.__missed_visit_penalty
@missed_visit_penalty.setter
def missed_visit_penalty(self, value):
self.__missed_visit_penalty = value
@property
def shift_adjustment(self):
return self.__shift_adjustment
StageSummary = collections.namedtuple('StageSummary', ['duration', 'final_cost', 'final_dropped_visits'])
def __init__(self, time_point):
self.__start = time_point
self.__events = []
self.__current_stage = None
self.__current_strategy = None
self.__problem = TraceLog.ProblemMessage()
@staticmethod
def __parse_stage_number(body):
comment = body.get('comment', None)
if comment:
match = TraceLog.__STAGE_PATTERN.match(comment)
if match:
return int(match.group('number'))
return None
def append(self, time_point, body):
if 'branches' in body:
body_to_use = TraceLog.ProgressMessage(**body)
elif 'type' in body:
if body['type'] == 'started':
self.__current_stage = self.__parse_stage_number(body)
elif body['type'] == 'finished':
self.__current_stage = None
self.__current_strategy = None
elif body['type'] == 'unknown':
if 'comment' in body:
if 'MissedVisitPenalty' in body['comment']:
match = re.match(self.__PENALTY_PATTERN, body['comment'])
assert match is not None
missed_visit_penalty = int(match.group('penalty'))
self.__problem.missed_visit_penalty = missed_visit_penalty
elif 'CarerUsedPenalty' in body['comment']:
match = re.match(self.__CARER_USED_PATTERN, body['comment'])
assert match is not None
carer_used_penalty = int(match.group('penalty'))
self.__problem.carer_used_penalty = carer_used_penalty
body_to_use = body
elif 'area' in body:
body_to_use = TraceLog.ProblemMessage(**body)
if body_to_use.missed_visit_penalty is None and self.__problem.missed_visit_penalty is not None:
body_to_use.missed_visit_penalty = self.__problem.missed_visit_penalty
if body_to_use.carer_used_penalty is None and self.__problem.carer_used_penalty is not None:
body_to_use.carer_used_penalty = self.__problem.carer_used_penalty
self.__problem = body_to_use
else:
body_to_use = body
# quick fix to prevent negative computation time if the time frame crosses midnight
if self.__start < time_point:
computation_time = time_point - self.__start
else:
computation_time = time_point + datetime.timedelta(hours=24) - self.__start
self.__events.append([computation_time, self.__current_stage, self.__current_strategy, time_point, body_to_use])
def compute_stages(self) -> typing.List[StageSummary]:
groups = dict()
for delta, stage, topic, time, message in self.__events:
if isinstance(message, TraceLog.ProgressMessage):
if stage not in groups:
groups[stage] = []
groups[stage].append([delta, topic, message])
result = []
def create_stage_summary(group):
duration = group[-1][0] - group[0][0]
cost = group[-1][2].cost
dropped_visits = group[-1][2].dropped_visits
return TraceLog.StageSummary(duration=duration, final_cost=cost, final_dropped_visits=dropped_visits)
if len(groups) == 1:
result.append(create_stage_summary(groups[None]))
else:
for stage in range(1, max(filter(lambda s: s is not None, groups)) + 1):
result.append(create_stage_summary(groups[stage]))
return result
def has_stages(self):
for relative_time, stage, strategy, absolute_time, event in self.__events:
if isinstance(event, TraceLog.ProblemMessage) or isinstance(event, TraceLog.ProgressMessage):
continue
if 'type' in event and event['type'] == 'started':
return True
return False
def best_cost(self, stage: int):
best_cost, _ = self.__best_cost_and_time(stage)
return best_cost
def best_cost_time(self, stage: int):
_, best_cost_time = self.__best_cost_and_time(stage)
return best_cost_time
def last_cost(self):
last_cost, _ = self.__last_cost_and_time()
return last_cost
def last_cost_time(self):
_, last_cost_time = self.__last_cost_and_time()
return last_cost_time
def computation_time(self):
computation_time = datetime.timedelta.max
for relative_time, stage, strategy, absolute_time, event in self.__events:
computation_time = relative_time
return computation_time
def __best_cost_and_time(self, stage: int):
best_cost = float('inf')
best_time = datetime.timedelta.max
for relative_time, event_stage, strategy, absolute_time, event in self.__filtered_events():
if event_stage > stage:
continue
if best_cost > event.cost:
best_cost = event.cost
best_time = relative_time
return best_cost, best_time
def __last_cost_and_time(self):
last_cost = float('inf')
last_time = datetime.timedelta.max
for relative_time, stage, strategy, absolute_time, event in self.__filtered_events():
last_cost = event.cost
last_time = relative_time
return last_cost, last_time
def __filtered_events(self):
for relative_time, stage, strategy, absolute_time, event in self.__events:
if stage != 2 and stage != 3:
continue
if strategy == 'DELAY_RISKINESS_REDUCTION':
continue
if not isinstance(event, TraceLog.ProgressMessage):
continue
yield relative_time, stage, strategy, absolute_time, event
@property
def strategy(self):
return self.__current_strategy
@strategy.setter
def strategy(self, value):
self.__current_strategy = value
@property
def visits(self):
return self.__problem.visits
@property
def carers(self):
return self.__problem.carers
@property
def date(self):
return self.__problem.date
@property
def visit_time_window(self):
return self.__problem.visit_time_window
@property
def carer_used_penalty(self):
return self.__problem.carer_used_penalty
@property
def missed_visit_penalty(self):
return self.__problem.missed_visit_penalty
@property
def shift_adjustment(self):
return self.__problem.shift_adjustment
@property
def events(self):
return self.__events
def read_traces(trace_file) -> typing.List[TraceLog]:
log_line_pattern = re.compile('^\w+\s+(?P<time>\d+:\d+:\d+\.\d+).*?]\s+(?P<body>.*)$')
other_line_pattern = re.compile('^.*?\[\w+\s+(?P<time>\d+:\d+:\d+\.\d+).*?\]\s+(?P<body>.*)$')
strategy_line_pattern = re.compile('^Solving the (?P<stage_name>\w+) stage using (?P<strategy_name>\w+) strategy$')
loaded_visits_pattern = re.compile('^Loaded past visits in \d+ seconds$')
trace_logs = []
has_preambule = False
with open(trace_file, 'r') as input_stream:
current_log = None
for line in input_stream:
match = log_line_pattern.match(line)
if not match:
match = other_line_pattern.match(line)
if match:
raw_time = match.group('time')
time = datetime.datetime.strptime(raw_time, '%H:%M:%S.%f')
try:
raw_body = match.group('body')
body = json.loads(raw_body)
if 'comment' in body and (body['comment'] == 'All'
or 'MissedVisitPenalty' in body['comment']
or 'CarerUsedPenalty' in body['comment']):
if body['comment'] == 'All':
if 'type' in body:
if body['type'] == 'finished':
has_preambule = False
current_log.strategy = None
elif body['type'] == 'started':
has_preambule = True
current_log = TraceLog(time)
current_log.append(time, body)
trace_logs.append(current_log)
else:
current_log.append(time, body)
elif 'area' in body and not has_preambule:
current_log = TraceLog(time)
current_log.append(time, body)
trace_logs.append(current_log)
else:
current_log.append(time, body)
except json.decoder.JSONDecodeError:
strategy_match = strategy_line_pattern.match(match.group('body'))
if strategy_match:
current_log.strategy = strategy_match.group('strategy_name')
continue
loaded_visits_match = loaded_visits_pattern.match(match.group('body'))
if loaded_visits_match:
continue
warnings.warn('Failed to parse line: ' + line)
elif 'GUIDED_LOCAL_SEARCH specified without sane timeout: solve may run forever.' in line:
continue
else:
warnings.warn('Failed to match line: ' + line)
return trace_logs
def traces_to_data_frame(trace_logs):
columns = ['relative_time', 'cost', 'dropped_visits', 'solutions', 'stage', 'stage_started', 'date', 'carers',
'visits']
has_stages = [trace.has_stages() for trace in trace_logs]
if all(has_stages) != any(has_stages):
raise ValueError('Some traces have stages while others do not')
has_stages = all(has_stages)
data = []
if has_stages:
for trace in trace_logs:
current_carers = None
current_visits = None
current_stage_started = None
current_stage_name = None
for rel_time, stage, strategy, abs_time, event in trace.events:
if isinstance(event, TraceLog.ProblemMessage):
current_carers = event.carers
current_visits = event.visits
elif isinstance(event, TraceLog.ProgressMessage):
if not current_stage_name:
continue
data.append([rel_time,
event.cost, event.dropped_visits, event.solutions,
current_stage_name, current_stage_started,
trace.date, current_carers, current_visits])
elif 'type' in event:
if 'comment' in event and event['type'] == 'unknown':
continue
if event['type'] == 'finished':
current_carers = None
current_visits = None
current_stage_started = None
current_stage_name = None
continue
if event['type'] == 'started':
current_stage_started = rel_time
current_stage_name = event['comment']
else:
for trace in trace_logs:
current_carers = None
current_visits = None
for rel_time, stage, strategy, abs_time, event in trace.events:
if isinstance(event, TraceLog.ProblemMessage):
current_carers = event.carers
current_visits = event.visits
elif isinstance(event, TraceLog.ProgressMessage):
data.append([rel_time,
event.cost, event.dropped_visits, event.solutions,
None, None,
trace.date, current_carers, current_visits])
return pandas.DataFrame(data=data, columns=columns)
def parse_pandas_duration(value):
raw_hours, raw_minutes, raw_seconds = value.split(':')
return datetime.timedelta(hours=int(raw_hours), minutes=int(raw_minutes), seconds=int(raw_seconds))
class DateTimeFormatter:
def __init__(self, format):
self.__format = format
def __call__(self, x, pos=None):
if x < 0:
return None
x_to_use = x
if isinstance(x, numpy.int64):
x_to_use = x.item()
delta = datetime.timedelta(seconds=x_to_use)
time_point = datetime.datetime(2017, 1, 1) + delta
return time_point.strftime(self.__format)
class AxisSettings:
def __init__(self, minutes_per_step, format_pattern, units_label, right_xlimit, xticks):
self.__minutes_per_step = minutes_per_step
self.__format_pattern = format_pattern
self.__formatter = matplotlib.ticker.FuncFormatter(DateTimeFormatter(self.__format_pattern))
self.__units_label = units_label
self.__right_xlimit = right_xlimit
self.__xticks = xticks
@property
def formatter(self):
return self.__formatter
@property
def units_label(self):
return self.__units_label
@property
def right_xlimit(self):
return self.__right_xlimit
@property
def xticks(self):
return self.__xticks
@staticmethod
def infer(max_relative_time):
if datetime.timedelta(minutes=30) < max_relative_time < datetime.timedelta(hours=1):
minutes_step = 10
format = '%H:%M'
units = '[hh:mm]'
elif datetime.timedelta(hours=1) <= max_relative_time:
minutes_step = 60
format = '%H:%M'
units = '[hh:mm]'
else:
assert max_relative_time <= datetime.timedelta(minutes=30)
minutes_step = 5
format = '%M:%S'
units = '[mm:ss]'
right_xlimit = (max_relative_time + datetime.timedelta(minutes=1)).total_seconds() // 60 * 60
xticks = numpy.arange(0, max_relative_time.total_seconds() + minutes_step * 60, minutes_step * 60)
return AxisSettings(minutes_step, format, units, right_xlimit, xticks)
def format_timedelta_pandas(x, pos=None):
if x < 0:
return None
time_delta = pandas.to_timedelta(x)
hours = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_HOUR)
minutes = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_MIN) - 60 * hours
return '{0:02d}:{1:02d}'.format(hours, minutes)
def format_time(x, pos=None):
if isinstance(x, numpy.int64):
x = x.item()
delta = datetime.timedelta(seconds=x)
time_point = datetime.datetime(2017, 1, 1) + delta
return time_point.strftime('%H:%M')
__SCATTER_POINT_SIZE = 1
__Y_AXIS_EXTENSION = 1.2
def add_trace_legend(axis, handles, bbox_to_anchor=(0.5, -0.23), ncol=3):
first_row = handles[0]
def legend_single_stage(row):
handle, multi_visits, visits, carers, cost_function, date = row
date_time = datetime.datetime.combine(date, datetime.time())
return 'V{0:02}/{1:03} C{2:02} {3} {4}'.format(multi_visits,
visits,
carers,
cost_function,
date_time.strftime('%d-%m'))
def legend_multi_stage(row):
handle, multi_visits, visits, multi_carers, carers, cost_function, date = row
date_time = datetime.datetime.combine(date, datetime.time())
return 'V{0:02}/{1:03} C{2:02}/{3:02} {4} {5}' \
.format(multi_visits, visits, multi_carers, carers, cost_function, date_time.strftime('%d-%m'))
if len(first_row) == 6:
legend_formatter = legend_single_stage
elif len(first_row) == 7:
legend_formatter = legend_multi_stage
else:
raise ValueError('Expecting row of either 6 or 7 elements')
return rows.plot.add_legend(axis,
list(map(operator.itemgetter(0), handles)),
list(map(legend_formatter, handles)),
ncol,
bbox_to_anchor)
def scatter_cost(axis, data_frame, color):
return axis.scatter(
[time_delta.total_seconds() for time_delta in data_frame['relative_time']], data_frame['cost'],
s=__SCATTER_POINT_SIZE,
c=color)
def scatter_dropped_visits(axis, data_frame, color):
axis.scatter(
[time_delta.total_seconds() for time_delta in data_frame['relative_time']],
data_frame['dropped_visits'],
s=__SCATTER_POINT_SIZE,
c=color)
def draw_avline(axis, point, color='lightgrey', linestyle='--'):
axis.axvline(point, color=color, linestyle=linestyle, linewidth=0.8, alpha=0.8)
def get_problem_stats(problem, date):
problem_visits = [visit for carer_visits in problem.visits
for visit in carer_visits.visits if visit.date == date]
return len(problem_visits), len([visit for visit in problem_visits if visit.carer_count > 1])
def compare_trace(args, settings):
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
cost_function = get_or_raise(args, __COST_FUNCTION_TYPE)
trace_file = get_or_raise(args, __FILE_ARG)
trace_file_base_name = os.path.basename(trace_file)
trace_file_stem, trace_file_ext = os.path.splitext(trace_file_base_name)
output_file_stem = getattr(args, __OUTPUT, trace_file_stem)
trace_logs = read_traces(trace_file)
data_frame = traces_to_data_frame(trace_logs)
current_date = getattr(args, __DATE_ARG, None)
dates = data_frame['date'].unique()
if current_date and current_date not in dates:
raise ValueError('Date {0} is not present in the data set'.format(current_date))
color_numbers = [0, 2, 4, 6, 8, 10, 12, 1, 3, 5, 7, 9, 11, 13]
color_number_it = iter(color_numbers)
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
max_relative_time = datetime.timedelta()
try:
if current_date:
current_color = color_map.colors[next(color_number_it)]
total_problem_visits, total_multiple_carer_visits = get_problem_stats(problem, current_date)
current_date_frame = data_frame[data_frame['date'] == current_date]
max_relative_time = max(current_date_frame['relative_time'].max(), max_relative_time)
ax_settings = AxisSettings.infer(max_relative_time)
stages = current_date_frame['stage'].unique()
if len(stages) > 1:
handles = []
for stage in stages:
time_delta = current_date_frame[current_date_frame['stage'] == stage]['stage_started'].iloc[0]
current_stage_data_frame = current_date_frame[current_date_frame['stage'] == stage]
draw_avline(ax1, time_delta.total_seconds())
draw_avline(ax2, time_delta.total_seconds())
total_stage_visits = current_stage_data_frame['visits'].iloc[0]
carers = current_stage_data_frame['carers'].iloc[0]
handle = scatter_cost(ax1, current_date_frame, current_color)
scatter_dropped_visits(ax2, current_stage_data_frame, current_color)
handles.append([handle,
total_multiple_carer_visits,
total_stage_visits,
carers,
cost_function,
current_date])
ax2.set_xlim(left=0)
ax2.set_ylim(bottom=-10)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
else:
total_visits = current_date_frame['visits'].iloc[0]
if total_visits != (total_problem_visits + total_multiple_carer_visits):
raise ValueError('Number of visits in problem and solution does not match: {0} vs {1}'
.format(total_visits, (total_problem_visits + total_multiple_carer_visits)))
carers = current_date_frame['carers'].iloc[0]
handle = ax1.scatter(
[time_delta.total_seconds() for time_delta in current_date_frame['relative_time']],
current_date_frame['cost'], s=1)
add_trace_legend(ax1, [[handle, total_multiple_carer_visits, total_problem_visits, carers, cost_function]])
scatter_dropped_visits(ax2, current_date_frame, current_color)
ax1_y_bottom, ax1_y_top = ax1.get_ylim()
ax1.set_ylim(bottom=0, top=ax1_y_top * __Y_AXIS_EXTENSION)
ax1.set_ylabel('Cost Function [s]')
ax2_y_bottom, ax2_y_top = ax2.get_ylim()
ax2.set_ylim(bottom=-10, top=ax2_y_top * __Y_AXIS_EXTENSION)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
rows.plot.save_figure(output_file_stem + '_' + current_date.isoformat())
else:
handles = []
for current_date in dates:
current_color = color_map.colors[next(color_number_it)]
current_date_frame = data_frame[data_frame['date'] == current_date]
max_relative_time = max(current_date_frame['relative_time'].max(), max_relative_time)
total_problem_visits, total_multiple_carer_visits = get_problem_stats(problem, current_date)
stages = current_date_frame['stage'].unique()
if len(stages) > 1:
stage_linestyles = [None, 'dotted', 'dashed']
for stage, linestyle in zip(stages, stage_linestyles):
time_delta = current_date_frame[current_date_frame['stage'] == stage]['stage_started'].iloc[0]
draw_avline(ax1, time_delta.total_seconds(), color=current_color, linestyle=linestyle)
draw_avline(ax2, time_delta.total_seconds(), color=current_color, linestyle=linestyle)
total_carers = current_date_frame['carers'].max()
multi_carers = current_date_frame['carers'].min()
if multi_carers == total_carers:
multi_carers = 0
total_visits = current_date_frame['visits'].max()
multi_visits = current_date_frame['visits'].min()
if multi_visits == total_visits:
multi_visits = 0
handle = scatter_cost(ax1, current_date_frame, current_color)
scatter_dropped_visits(ax2, current_date_frame, current_color)
handles.append([handle,
multi_visits,
total_visits,
multi_carers,
total_carers,
cost_function,
current_date])
else:
total_visits = current_date_frame['visits'].iloc[0]
if total_visits != (total_problem_visits + total_multiple_carer_visits):
raise ValueError('Number of visits in problem and solution does not match: {0} vs {1}'
.format(total_visits, (total_problem_visits + total_multiple_carer_visits)))
carers = current_date_frame['carers'].iloc[0]
handle = scatter_cost(ax1, current_date_frame, current_color)
handles.append([handle,
total_multiple_carer_visits,
total_problem_visits,
carers,
cost_function,
current_date])
scatter_dropped_visits(ax2, current_date_frame, current_color)
ax_settings = AxisSettings.infer(max_relative_time)
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
# if add_arrows:
# ax1.arrow(950, 200000, 40, -110000, head_width=10, head_length=20000, fc='k', ec='k')
# ax2.arrow(950, 60, 40, -40, head_width=10, head_length=10, fc='k', ec='k')
ax1_y_bottom, ax1_y_top = ax1.get_ylim()
ax1.set_ylim(bottom=0, top=ax1_y_top * __Y_AXIS_EXTENSION)
ax1.set_xlim(left=0, right=ax_settings.right_xlimit)
ax1.set_ylabel('Cost Function [s]')
ax2_y_bottom, ax2_y_top = ax2.get_ylim()
ax2.set_ylim(bottom=-10, top=ax2_y_top * __Y_AXIS_EXTENSION)
ax2.set_xlim(left=0, right=ax_settings.right_xlimit)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax2.set_xticks(ax_settings.xticks)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
matplotlib.pyplot.tight_layout()
rows.plot.save_figure(output_file_stem)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def get_schedule_stats(data_frame):
def get_stage_stats(stage):
if stage and (isinstance(stage, str) or (isinstance(stage, float) and not numpy.isnan(stage))):
stage_frame = data_frame[data_frame['stage'] == stage]
else:
stage_frame = data_frame[data_frame['stage'].isnull()]
min_carers, max_carers = stage_frame['carers'].min(), stage_frame['carers'].max()
if min_carers != max_carers:
raise ValueError(
'Numbers of carer differs within stage in range [{0}, {1}]'.format(min_carers, max_carers))
min_visits, max_visits = stage_frame['visits'].min(), stage_frame['visits'].max()
if min_visits != max_visits:
raise ValueError(
'Numbers of carer differs within stage in range [{0}, {1}]'.format(min_visits, max_visits))
return min_carers, min_visits
stages = data_frame['stage'].unique()
if len(stages) > 1:
data = []
for stage in stages:
carers, visits = get_stage_stats(stage)
data.append([stage, carers, visits])
return data
else:
stage_to_use = None
if len(stages) == 1:
stage_to_use = stages[0]
carers, visits = get_stage_stats(stage_to_use)
return [[None, carers, visits]]
def contrast_trace(args, settings):
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
output_file_stem = getattr(args, __OUTPUT, problem_file_name + '_contrast_traces')
cost_function = get_or_raise(args, __COST_FUNCTION_TYPE)
base_trace_file = get_or_raise(args, __BASE_FILE_ARG)
candidate_trace_file = get_or_raise(args, __CANDIDATE_FILE_ARG)
base_frame = traces_to_data_frame(read_traces(base_trace_file))
candidate_frame = traces_to_data_frame(read_traces(candidate_trace_file))
current_date = get_or_raise(args, __DATE_ARG)
if current_date not in base_frame['date'].unique():
raise ValueError('Date {0} is not present in the base data set'.format(current_date))
if current_date not in candidate_frame['date'].unique():
raise ValueError('Date {0} is not present in the candidate data set'.format(current_date))
max_relative_time = datetime.timedelta()
max_relative_time = max(base_frame[base_frame['date'] == current_date]['relative_time'].max(), max_relative_time)
max_relative_time = max(candidate_frame[candidate_frame['date'] == current_date]['relative_time'].max(), max_relative_time)
max_relative_time = datetime.timedelta(minutes=20)
ax_settings = AxisSettings.infer(max_relative_time)
color_map = matplotlib.cm.get_cmap('Set1')
matplotlib.pyplot.set_cmap(color_map)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
try:
def plot(data_frame, color):
stages = data_frame['stage'].unique()
if len(stages) > 1:
for stage, linestyle in zip(stages, [None, 'dotted', 'dashed']):
time_delta = data_frame[data_frame['stage'] == stage]['stage_started'].iloc[0]
draw_avline(ax1, time_delta.total_seconds(), linestyle=linestyle)
draw_avline(ax2, time_delta.total_seconds(), linestyle=linestyle)
scatter_dropped_visits(ax2, data_frame, color=color)
return scatter_cost(ax1, data_frame, color=color)
base_current_data_frame = base_frame[base_frame['date'] == current_date]
base_handle = plot(base_current_data_frame, color_map.colors[0])
base_stats = get_schedule_stats(base_current_data_frame)
candidate_current_data_frame = candidate_frame[candidate_frame['date'] == current_date]
candidate_handle = plot(candidate_current_data_frame, color_map.colors[1])
candidate_stats = get_schedule_stats(candidate_current_data_frame)
labels = []
for stages in [base_stats, candidate_stats]:
if len(stages) == 1:
labels.append('Direct')
elif len(stages) > 1:
labels.append('Multistage')
else:
raise ValueError()
ax1.set_ylim(bottom=0.0)
ax1.set_ylabel('Cost Function [s]')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
ax1.set_xlim(left=0.0, right=max_relative_time.total_seconds())
legend1 = ax1.legend([base_handle, candidate_handle], labels)
for handle in legend1.legendHandles:
handle._sizes = [25]
ax2.set_xlim(left=0.0, right=max_relative_time.total_seconds())
ax2.set_ylim(bottom=0.0)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax1.set_xticks(ax_settings.xticks)
ax2.set_xticks(ax_settings.xticks)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
legend2 = ax2.legend([base_handle, candidate_handle], labels)
for handle in legend2.legendHandles:
handle._sizes = [25]
figure.tight_layout()
matplotlib.pyplot.tight_layout()
rows.plot.save_figure(output_file_stem + '_' + current_date.isoformat())
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
try:
candidate_current_data_frame = candidate_frame[candidate_frame['date'] == current_date]
scatter_dropped_visits(ax2, candidate_current_data_frame, color=color_map.colors[1])
scatter_cost(ax1, candidate_current_data_frame, color=color_map.colors[1])
stage2_started = \
candidate_current_data_frame[candidate_current_data_frame['stage'] == 'Stage2']['stage_started'].iloc[0]
ax1.set_ylim(bottom=0, top=6 * 10 ** 4)
ax1.set_ylabel('Cost Function [s]')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
ax1.set_xlim(left=0, right=12)
ax2.set_xlim(left=0, right=12)
x_ticks_positions = range(0, 12 + 1, 2)
# matplotlib.pyplot.locator_params(axis='x', nbins=6)
ax2.set_ylim(bottom=-10.0, top=120)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax2.set_xticks(x_ticks_positions)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
matplotlib.pyplot.tight_layout()
# rows.plot.save_figure(output_file_stem + '_first_stage_' + current_date.isoformat())
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compare_box_plots(args, settings):
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
base_trace_file = get_or_raise(args, __BASE_FILE_ARG)
output_file_stem = getattr(args, __OUTPUT, problem_file_name)
traces = read_traces(base_trace_file)
figure, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(1, 3)
stages = [trace.compute_stages() for trace in traces]
num_stages = max(len(s) for s in stages)
durations = [[getattr(local_stage[num_stage], 'duration').total_seconds() for local_stage in stages] for num_stage in range(num_stages)]
max_duration = max(max(stage_durations) for stage_durations in durations)
axis_settings = AxisSettings.infer(datetime.timedelta(seconds=max_duration))
try:
ax1.boxplot(durations, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax1.set_yticks(axis_settings.xticks)
ax1.yaxis.set_major_formatter(axis_settings.formatter)
ax1.set_xlabel('Stage')
ax1.set_ylabel('Duration [hh:mm]')
costs = [[getattr(local_stage[num_stage], 'final_cost') for local_stage in stages] for num_stage in range(num_stages)]
ax2.boxplot(costs, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
formatter = matplotlib.ticker.ScalarFormatter()
formatter.set_scientific(True)
formatter.set_powerlimits((-3, 3))
ax2.yaxis.set_major_formatter(formatter)
ax2.set_xlabel('Stage')
ax2.set_ylabel('Cost')
declined_visits = [[getattr(local_stage[num_stage], 'final_dropped_visits') for local_stage in stages] for num_stage in range(num_stages)]
ax3.boxplot(declined_visits, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
max_declined_visits = max(max(declined_visits))
ax3.set_xlabel('Stage')
ax3.set_ylabel('Declined Visits')
dropped_visit_ticks = None
if max_declined_visits < 100:
dropped_visit_ticks = range(0, max_declined_visits + 1)
else:
dropped_visit_ticks = range(0, max_declined_visits + 100, 100)
ax3.set_yticks(dropped_visit_ticks)
figure.tight_layout()
rows.plot.save_figure(output_file_stem)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compare_prediction_error(args, settings):
base_schedule = rows.plot.load_schedule(get_or_raise(args, __BASE_FILE_ARG))
candidate_schedule = rows.plot.load_schedule(get_or_raise(args, __CANDIDATE_FILE_ARG))
observed_duration_by_visit = rows.plot.calculate_observed_visit_duration(base_schedule)
expected_duration_by_visit = calculate_expected_visit_duration(candidate_schedule)
data = []
for visit in base_schedule.visits:
observed_duration = observed_duration_by_visit[visit.visit]
expected_duration = expected_duration_by_visit[visit.visit]
data.append([visit.key, observed_duration.total_seconds(), expected_duration.total_seconds()])
frame = pandas.DataFrame(columns=['Visit', 'ObservedDuration', 'ExpectedDuration'], data=data)
frame['Error'] = (frame.ObservedDuration - frame.ExpectedDuration) / frame.ObservedDuration
figure, axis = matplotlib.pyplot.subplots()
try:
axis.plot(frame['Error'], label='(Observed - Expected)/Observed)')
axis.legend()
axis.set_ylim(-20, 2)
axis.grid()
matplotlib.pyplot.show()
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def remove_violated_visits(rough_schedule: rows.model.schedule.Schedule,
metadata: TraceLog,
problem: rows.model.problem.Problem,
duration_estimator: rows.plot.DurationEstimator,
distance_estimator: rows.plot.DistanceEstimator) -> rows.model.schedule.Schedule:
max_delay = metadata.visit_time_window
min_delay = -metadata.visit_time_window
dropped_visits = 0
allowed_visits = []
for route in rough_schedule.routes:
carer_diary = problem.get_diary(route.carer, metadata.date)
if not carer_diary:
continue
for visit in route.visits:
if visit.check_in is not None:
check_in_delay = visit.check_in - datetime.datetime.combine(metadata.date, visit.time)
if check_in_delay > max_delay: # or check_in_delay < min_delay:
dropped_visits += 1
continue
allowed_visits.append(visit)
# schedule does not have visits which exceed time windows
first_improved_schedule = rows.model.schedule.Schedule(carers=rough_schedule.carers, visits=allowed_visits)
allowed_visits = []
for route in first_improved_schedule.routes:
if not route.visits:
continue
diary = problem.get_diary(route.carer, metadata.date)
assert diary is not None
# shift adjustment is added twice because it is allowed to extend the time before and after the working hours
max_shift_end = max(event.end for event in diary.events) + metadata.shift_adjustment + metadata.shift_adjustment
first_visit = route.visits[0]
current_time = datetime.datetime.combine(metadata.date, first_visit.time)
if current_time <= max_shift_end:
allowed_visits.append(first_visit)
visits_made = []
total_slack = datetime.timedelta()
if len(route.visits) == 1:
visit = route.visits[0]
visit_duration = duration_estimator(visit.visit)
if visit_duration is None:
visit_duration = visit.duration
current_time += visit_duration
if current_time <= max_shift_end:
visits_made.append(visit)
else:
dropped_visits += 1
else:
for prev_visit, next_visit in route.edges():
visit_duration = duration_estimator(prev_visit.visit)
if visit_duration is None:
visit_duration = prev_visit.duration
current_time += visit_duration
current_time += distance_estimator(prev_visit, next_visit)
start_time = max(current_time, datetime.datetime.combine(metadata.date, next_visit.time) - max_delay)
total_slack += start_time - current_time
current_time = start_time
if current_time <= max_shift_end:
visits_made.append(next_visit)
else:
dropped_visits += 1
if current_time <= max_shift_end:
total_slack += max_shift_end - current_time
total_break_duration = datetime.timedelta()
for carer_break in diary.breaks:
total_break_duration += carer_break.duration
if total_slack + datetime.timedelta(hours=2) < total_break_duration:
# route is not respecting contractual breaks
visits_made.pop()
for visit in visits_made:
allowed_visits.append(visit)
# schedule does not contain visits which exceed overtime of the carer
return rows.model.schedule.Schedule(carers=rough_schedule.carers, visits=allowed_visits)
class ScheduleCost:
CARER_COST = datetime.timedelta(seconds=60 * 60 * 4)
def __init__(self, travel_time: datetime.timedelta, carers_used: int, visits_missed: int, missed_visit_penalty: int):
self.__travel_time = travel_time
self.__carers_used = carers_used
self.__visits_missed = visits_missed
self.__missed_visit_penalty = missed_visit_penalty
@property
def travel_time(self) -> datetime.timedelta:
return self.__travel_time
@property
def visits_missed(self) -> int:
return self.__visits_missed
@property
def missed_visit_penalty(self) -> int:
return self.__missed_visit_penalty
@property
def carers_used(self) -> int:
return self.__carers_used
def total_cost(self, include_vehicle_cost: bool) -> datetime.timedelta:
cost = self.__travel_time.total_seconds() + self.__missed_visit_penalty * self.__visits_missed
if include_vehicle_cost:
cost += self.CARER_COST.total_seconds() * self.__carers_used
return cost
def get_schedule_cost(schedule: rows.model.schedule.Schedule,
metadata: TraceLog,
problem: rows.model.problem.Problem,
distance_estimator: rows.plot.DistanceEstimator) -> ScheduleCost:
carer_used_ids = set()
visit_made_ids = set()
travel_time = datetime.timedelta()
for route in schedule.routes:
if not route.visits:
continue
carer_used_ids.add(route.carer.sap_number)
for visit in route.visits:
visit_made_ids.add(visit.visit.key)
for source, destination in route.edges():
travel_time += distance_estimator(source, destination)
available_visit_ids = {visit.key for visit in problem.requested_visits(schedule.date)}
return ScheduleCost(travel_time, len(carer_used_ids), len(available_visit_ids.difference(visit_made_ids)), metadata.missed_visit_penalty)
def compare_schedule_cost(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig',
['ProblemPath', 'HumanSolutionPath', 'SolverSecondSolutionPath', 'SolverThirdSolutionPath'])
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
solver_log_file = os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5.err.log')
problem_data = [ProblemConfig(os.path.join(simulation_dir, 'problems/C350_past.json'),
os.path.join(simulation_dir, 'planner_schedules/C350_planners_201710{0:02d}.json'.format(day)),
os.path.join(simulation_dir, 'solutions/second_stage_c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)),
os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15, 1)]
solver_traces = read_traces(solver_log_file)
assert len(solver_traces) == len(problem_data)
results = []
include_vehicle_cost = False
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
def normalize_cost(value) -> float:
if isinstance(value, datetime.timedelta):
value_to_use = value.total_seconds()
elif isinstance(value, float) or isinstance(value, int):
value_to_use = value
else:
return float('inf')
return round(value_to_use / 3600, 2)
for solver_trace, problem_data in list(zip(solver_traces, problem_data)):
problem = rows.load.load_problem(os.path.join(simulation_dir, problem_data.ProblemPath))
human_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.HumanSolutionPath))
solver_second_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverSecondSolutionPath))
solver_third_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverThirdSolutionPath))
assert solver_second_schedule.date == human_schedule.date
assert solver_third_schedule.date == human_schedule.date
available_carers = problem.available_carers(human_schedule.date)
requested_visits = problem.requested_visits(human_schedule.date)
one_carer_visits = [visit for visit in requested_visits if visit.carer_count == 1]
two_carer_visits = [visit for visit in requested_visits if visit.carer_count == 2]
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(solver_third_schedule)
human_schedule_to_use = remove_violated_visits(human_schedule, solver_trace, problem, duration_estimator, distance_estimator)
solver_second_schedule_to_use = remove_violated_visits(solver_second_schedule, solver_trace, problem, duration_estimator,
distance_estimator)
solver_third_schedule_to_use = remove_violated_visits(solver_third_schedule, solver_trace, problem, duration_estimator,
distance_estimator)
human_cost = get_schedule_cost(human_schedule_to_use, solver_trace, problem, distance_estimator)
solver_second_cost = get_schedule_cost(solver_second_schedule_to_use, solver_trace, problem, distance_estimator)
solver_third_cost = get_schedule_cost(solver_third_schedule_to_use, solver_trace, problem, distance_estimator)
results.append(collections.OrderedDict(date=solver_trace.date,
day=solver_trace.date.day,
carers=len(available_carers),
one_carer_visits=len(one_carer_visits),
two_carer_visits=2 * len(two_carer_visits),
missed_visit_penalty=normalize_cost(solver_trace.missed_visit_penalty),
carer_used_penalty=normalize_cost(solver_trace.carer_used_penalty),
planner_missed_visits=human_cost.visits_missed,
solver_second_missed_visits=solver_second_cost.visits_missed,
solver_third_missed_visits=solver_third_cost.visits_missed,
planner_travel_time=normalize_cost(human_cost.travel_time),
solver_second_travel_time=normalize_cost(solver_second_cost.travel_time),
solver_third_travel_time=normalize_cost(solver_third_cost.travel_time),
planner_carers_used=human_cost.carers_used,
solver_second_carers_used=solver_second_cost.carers_used,
solver_third_carers_used=solver_third_cost.carers_used,
planner_total_cost=normalize_cost(human_cost.total_cost(include_vehicle_cost)),
solver_second_total_cost=normalize_cost(solver_second_cost.total_cost(include_vehicle_cost)),
solver_third_total_cost=normalize_cost(solver_third_cost.total_cost(include_vehicle_cost)),
solver_second_time=int(math.ceil(solver_trace.best_cost_time(2).total_seconds())),
solver_third_time=int(math.ceil(solver_trace.best_cost_time(3).total_seconds()))))
data_frame = pandas.DataFrame(data=results)
print(tabulate.tabulate(data_frame, tablefmt='psql', headers='keys'))
print(tabulate.tabulate(data_frame[['day', 'carers', 'one_carer_visits', 'two_carer_visits', 'missed_visit_penalty',
'planner_total_cost', 'solver_second_total_cost', 'solver_third_total_cost',
'planner_missed_visits', 'solver_second_missed_visits', 'solver_third_missed_visits',
'planner_travel_time', 'solver_second_travel_time', 'solver_third_travel_time', 'solver_second_time',
'solver_third_time']],
tablefmt='latex', headers='keys', showindex=False))
def get_consecutive_visit_time_span(schedule: rows.model.schedule.Schedule, start_time_estimator):
client_visits = collections.defaultdict(list)
for visit in schedule.visits:
client_visits[visit.visit.service_user].append(visit)
for client in client_visits:
visits = client_visits[client]
used_keys = set()
unique_visits = []
for visit in visits:
date_time = start_time_estimator(visit)
if date_time.hour == 0 and date_time.minute == 0:
continue
if visit.visit.key not in used_keys:
used_keys.add(visit.visit.key)
unique_visits.append(visit)
unique_visits.sort(key=start_time_estimator)
client_visits[client] = unique_visits
client_span = collections.defaultdict(datetime.timedelta)
for client in client_visits:
if len(client_visits[client]) < 2:
continue
last_visit = client_visits[client][0]
total_span = datetime.timedelta()
for next_visit in client_visits[client][1:]:
total_span += start_time_estimator(next_visit) - start_time_estimator(last_visit)
last_visit = next_visit
client_span[client] = total_span
return client_span
def get_carer_client_frequency(schedule: rows.model.schedule.Schedule):
client_assigned_carers = collections.defaultdict(collections.Counter)
for visit in schedule.visits:
client_assigned_carers[int(visit.visit.service_user)][int(visit.carer.sap_number)] += 1
return client_assigned_carers
def get_visits(problem: rows.model.problem.Problem, date: datetime.date):
visits = set()
for local_visits in problem.visits:
for visit in local_visits.visits:
if date != visit.date:
continue
visit.service_user = local_visits.service_user
visits.add(visit)
return visits
def get_teams(problem: rows.model.problem.Problem, schedule: rows.model.schedule.Schedule):
multiple_carer_visit_keys = set()
for visit in get_visits(problem, schedule.date):
if visit.carer_count > 1:
multiple_carer_visit_keys.add(visit.key)
client_visit_carers = collections.defaultdict(lambda: collections.defaultdict(list))
for visit in schedule.visits:
if visit.visit.key not in multiple_carer_visit_keys:
continue
client_visit_carers[visit.visit.service_user][visit.visit.key].append(int(visit.carer.sap_number))
for client in client_visit_carers:
for visit_key in client_visit_carers[client]:
client_visit_carers[client][visit_key].sort()
teams = set()
for client in client_visit_carers:
for visit_key in client_visit_carers[client]:
teams.add(tuple(client_visit_carers[client][visit_key]))
return teams
def compare_schedule_quality(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig', ['ProblemPath', 'HumanSolutionPath', 'SolverSolutionPath'])
def compare_quality(solver_trace, problem, human_schedule, solver_schedule, duration_estimator, distance_estimator):
visits = get_visits(problem, solver_trace.date)
multiple_carer_visit_keys = {visit.key for visit in visits if visit.carer_count > 1}
clients = list({int(visit.service_user) for visit in visits})
# number of different carers assigned throughout the day
human_carer_frequency = get_carer_client_frequency(human_schedule)
solver_carer_frequency = get_carer_client_frequency(solver_schedule)
def median_carer_frequency(client_counters):
total_counters = []
for client in client_counters:
# total_counters += len(client_counters[client])
total_counters.append(len(client_counters[client]))
# return total_counters / len(client_counters)
return numpy.median(total_counters)
human_schedule_squared = []
solver_schedule_squared = []
for client in clients:
if client in human_carer_frequency:
human_schedule_squared.append(sum(human_carer_frequency[client][carer] ** 2 for carer in human_carer_frequency[client]))
else:
human_schedule_squared.append(0)
if client in solver_carer_frequency:
solver_schedule_squared.append(sum(solver_carer_frequency[client][carer] ** 2 for carer in solver_carer_frequency[client]))
else:
solver_schedule_squared.append(0)
human_matching_dominates = 0
solver_matching_dominates = 0
for index in range(len(clients)):
if human_schedule_squared[index] > solver_schedule_squared[index]:
human_matching_dominates += 1
elif human_schedule_squared[index] < solver_schedule_squared[index]:
solver_matching_dominates += 1
matching_no_diff = len(clients) - human_matching_dominates - solver_matching_dominates
assert matching_no_diff >= 0
human_schedule_span = get_consecutive_visit_time_span(human_schedule, lambda visit: visit.check_in)
solver_schedule_span = get_consecutive_visit_time_span(solver_schedule, lambda visit: datetime.datetime.combine(visit.date, visit.time))
human_span_dominates = 0
solver_span_dominates = 0
for client in clients:
if human_schedule_span[client] > solver_schedule_span[client]:
human_span_dominates += 1
elif human_schedule_span[client] < solver_schedule_span[client]:
solver_span_dominates += 1
span_no_diff = len(clients) - human_span_dominates - solver_span_dominates
assert span_no_diff > 0
human_teams = get_teams(problem, human_schedule)
solver_teams = get_teams(problem, solver_schedule)
human_schedule_frame = rows.plot.get_schedule_data_frame(human_schedule, problem, duration_estimator, distance_estimator)
solver_schedule_frame = rows.plot.get_schedule_data_frame(solver_schedule, problem, duration_estimator, distance_estimator)
human_visits = human_schedule_frame['Visits'].median()
solver_visits = solver_schedule_frame['Visits'].median()
human_total_overtime = compute_overtime(human_schedule_frame).sum()
solver_total_overtime = compute_overtime(solver_schedule_frame).sum()
return {'problem': str(human_schedule.date),
'visits': len(visits),
'clients': len(clients),
'human_overtime': human_total_overtime,
'solver_overtime': solver_total_overtime,
'human_visits_median': human_visits,
'solver_visits_median': solver_visits,
'human_visit_span_dominates': human_span_dominates,
'solver_visit_span_dominates': solver_span_dominates,
'visit_span_indifferent': span_no_diff,
'human_matching_dominates': human_matching_dominates,
'solver_matching_dominates': solver_matching_dominates,
'human_carer_frequency': median_carer_frequency(human_carer_frequency),
'solver_carer_frequency': median_carer_frequency(solver_carer_frequency),
'matching_indifferent': matching_no_diff,
'human_teams': len(human_teams),
'solver_teams': len(solver_teams)}
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
solver_log_file = os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5.err.log')
problem_data = [ProblemConfig(os.path.join(simulation_dir, 'problems/C350_past.json'),
os.path.join(simulation_dir, 'planner_schedules/C350_planners_201710{0:02d}.json'.format(day)),
os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15, 1)]
solver_traces = read_traces(solver_log_file)
assert len(solver_traces) == len(problem_data)
results = []
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for solver_trace, problem_data in zip(solver_traces, problem_data):
problem = rows.load.load_problem(os.path.join(simulation_dir, problem_data.ProblemPath))
human_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.HumanSolutionPath))
solver_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverSolutionPath))
assert solver_trace.date == human_schedule.date
assert solver_trace.date == solver_schedule.date
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(solver_schedule)
human_schedule_to_use = remove_violated_visits(human_schedule, solver_trace, problem, duration_estimator, distance_estimator)
solver_schedule_to_use = remove_violated_visits(solver_schedule, solver_trace, problem, duration_estimator, distance_estimator)
row = compare_quality(solver_trace, problem, human_schedule_to_use, solver_schedule_to_use, duration_estimator, distance_estimator)
results.append(row)
data_frame = pandas.DataFrame(data=results)
data_frame['human_visit_span_dominates_rel'] = data_frame['human_visit_span_dominates'] / data_frame['clients']
data_frame['human_visit_span_dominates_rel_label'] = data_frame['human_visit_span_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['solver_visit_span_dominates_rel'] = data_frame['solver_visit_span_dominates'] / data_frame['clients']
data_frame['solver_visit_span_dominates_rel_label'] = data_frame['solver_visit_span_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['visit_span_indifferent_rel'] = data_frame['visit_span_indifferent'] / data_frame['clients']
data_frame['human_matching_dominates_rel'] = data_frame['human_matching_dominates'] / data_frame['clients']
data_frame['human_matching_dominates_rel_label'] = data_frame['human_matching_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['solver_matching_dominates_rel'] = data_frame['solver_matching_dominates'] / data_frame['clients']
data_frame['solver_matching_dominates_rel_label'] = data_frame['solver_matching_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['matching_indifferent_rel'] = data_frame['matching_indifferent'] / data_frame['clients']
data_frame['day'] = data_frame['problem'].apply(lambda label: datetime.datetime.strptime(label, '%Y-%m-%d').date().day)
data_frame['human_overtime_label'] = data_frame['human_overtime'].apply(get_time_delta_label)
data_frame['solver_overtime_label'] = data_frame['solver_overtime'].apply(get_time_delta_label)
print(tabulate.tabulate(data_frame, tablefmt='psql', headers='keys'))
print(tabulate.tabulate(data_frame[['day', 'human_visits_median', 'solver_visits_median', 'human_overtime_label', 'solver_overtime_label',
'human_carer_frequency', 'solver_carer_frequency',
'human_matching_dominates_rel_label', 'solver_matching_dominates_rel_label',
'human_teams', 'solver_teams']], tablefmt='latex', showindex=False, headers='keys'))
BenchmarkData = collections.namedtuple('BenchmarkData', ['BestCost', 'BestCostTime', 'BestBound', 'ComputationTime'])
class MipTrace:
__MIP_HEADER_PATTERN = re.compile('^\s*Expl\s+Unexpl\s+|\s+Obj\s+Depth\s+IntInf\s+|\s+Incumbent\s+BestBd\s+Gap\s+|\s+It/Node\s+Time\s*$')
__MIP_LINE_PATTERN = re.compile('^(?P<solution_flag>[\w\*]?)\s*'
'(?P<explored_nodes>\d+)\s+'
'(?P<nodes_to_explore>\d+)\s+'
'(?P<node_relaxation>[\w\.]*)\s+'
'(?P<node_depth>\d*)\s+'
'(?P<fractional_variables>\w*)\s+'
'(?P<incumbent>[\d\.\-]*)\s+'
'(?P<lower_bound>[\d\.\-]*)\s+'
'(?P<gap>[\d\.\%\-]*)\s+'
'(?P<simplex_it_per_node>[\d\.\-]*)\s+'
'(?P<elapsed_time>\d+)s$')
__SUMMARY_PATTERN = re.compile('^Best\sobjective\s(?P<objective>[e\d\.\+]+),\s'
'best\sbound\s(?P<bound>[e\d\.\+]+),\s'
'gap\s(?P<gap>[e\d\.\+]+)\%$')
class MipProgressMessage:
def __init__(self, has_solution, best_cost, lower_bound, elapsed_time):
self.__has_solution = has_solution
self.__best_cost = best_cost
self.__lower_bound = lower_bound
self.__elapsed_time = elapsed_time
@property
def has_solution(self):
return self.__has_solution
@property
def best_cost(self):
return self.__best_cost
@property
def lower_bound(self):
return self.__lower_bound
@property
def elapsed_time(self):
return self.__elapsed_time
def __init__(self, best_objective: float, best_bound: float, events: typing.List[MipProgressMessage]):
self.__best_objective = best_objective
self.__best_bound = best_bound
self.__events = events
@staticmethod
def read_from_file(path) -> 'MipTrace':
events = []
best_objective = float('inf')
best_bound = float('-inf')
with open(path, 'r') as fp:
lines = fp.readlines()
lines_it = iter(lines)
for line in lines_it:
if re.match(MipTrace.__MIP_HEADER_PATTERN, line):
break
next(lines_it, None) # read the empty line
for line in lines_it:
line_match = re.match(MipTrace.__MIP_LINE_PATTERN, line)
if not line_match:
break
raw_solution_flag = line_match.group('solution_flag')
raw_incumbent = line_match.group('incumbent')
raw_lower_bound = line_match.group('lower_bound')
raw_elapsed_time = line_match.group('elapsed_time')
has_solution = raw_solution_flag == 'H' or raw_solution_flag == '*'
incumbent = float(raw_incumbent) if raw_incumbent and raw_incumbent != '-' else float('inf')
lower_bound = float(raw_lower_bound) if raw_lower_bound else float('-inf')
elapsed_time = datetime.timedelta(seconds=int(raw_elapsed_time)) if raw_elapsed_time else datetime.timedelta()
events.append(MipTrace.MipProgressMessage(has_solution, incumbent, lower_bound, elapsed_time))
next(lines_it, None)
for line in lines_it:
line_match = re.match(MipTrace.__SUMMARY_PATTERN, line)
if line_match:
raw_objective = line_match.group('objective')
if raw_objective:
best_objective = float(raw_objective)
raw_bound = line_match.group('bound')
if raw_bound:
best_bound = float(raw_bound)
return MipTrace(best_objective, best_bound, events)
def best_cost(self):
return self.__best_objective
def best_cost_time(self):
for event in reversed(self.__events):
if event.has_solution:
return event.elapsed_time
return datetime.timedelta.max
def best_bound(self):
return self.__best_bound
def computation_time(self):
if self.__events:
return self.__events[-1].elapsed_time
return datetime.timedelta.max
class DummyTrace:
def __init__(self):
pass
def best_cost(self):
return float('inf')
def best_bound(self):
return 0
def best_cost_time(self):
return datetime.timedelta(hours=23, minutes=59, seconds=59)
def compare_benchmark_table(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig', ['ProblemPath', 'Carers', 'Visits', 'Visits2', 'MipSolutionLog',
'CpTeamSolutionLog',
'CpWindowsSolutionLog'])
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
old_simulation_dir = '/home/pmateusz/dev/cordia/simulations/review_simulations_old'
dummy_log = DummyTrace()
problem_configs = [ProblemConfig(os.path.join(simulation_dir, 'benchmark/25/problem_201710{0:02d}_v25m0c3.json'.format(day_number)),
3, 25, 0,
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3.err.log'.format(day_number)))
for day_number in range(1, 15, 1)]
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/25/problem_201710{0:02d}_v25m5c3.json'.format(day_number)),
3, 20, 5,
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m5c3_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_teams_v25m5c3.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_windows_v25m5c3.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/50/problem_201710{0:02d}_v50m0c5.json'.format(day_number)),
5, 50, 0,
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/50/problem_201710{0:02d}_v50m10c5.json'.format(day_number)),
5, 40, 10,
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m10c5_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_teams_v50m10c5.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_windows_v50m10c5.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
logs = []
for problem_config in problem_configs:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if os.path.exists(problem_config.CpTeamSolutionLog):
cp_team_logs = read_traces(problem_config.CpTeamSolutionLog)
if not cp_team_logs:
warnings.warn('File {0} is empty'.format(problem_config.CpTeamSolutionLog))
cp_team_logs = dummy_log
else:
cp_team_log = cp_team_logs[0]
else:
cp_team_logs = dummy_log
if os.path.exists(problem_config.CpWindowsSolutionLog):
cp_window_logs = read_traces(problem_config.CpWindowsSolutionLog)
if not cp_window_logs:
warnings.warn('File {0} is empty'.format(problem_config.CpWindowsSolutionLog))
cp_window_logs = dummy_log
else:
cp_window_log = cp_window_logs[0]
else:
cp_window_logs = dummy_log
if os.path.exists(problem_config.MipSolutionLog):
mip_log = MipTrace.read_from_file(problem_config.MipSolutionLog)
if not mip_log:
warnings.warn('File {0} is empty'.format(problem_config.MipSolutionLog))
mip_log = dummy_log
else:
mip_log = dummy_log
logs.append([problem_config, mip_log, cp_team_log, cp_window_log])
def get_gap(cost: float, lower_bound: float) -> float:
if lower_bound == 0.0:
return float('inf')
return (cost - lower_bound) * 100.0 / lower_bound
def get_delta(cost, cost_to_compare):
return (cost - cost_to_compare) * 100.0 / cost_to_compare
def get_computation_time_label(time: datetime.timedelta) -> str:
return str(time.total_seconds())
data = []
for problem_config, mip_log, cp_team_log, cp_window_log in logs:
data.append(collections.OrderedDict(
date=cp_team_log.date,
visits=problem_config.Visits,
visits_of_two=problem_config.Visits2,
carers=cp_team_log.carers,
penalty=cp_team_log.missed_visit_penalty,
lower_bound=mip_log.best_bound(),
mip_best_cost=mip_log.best_cost(),
mip_best_gap=get_gap(mip_log.best_cost(), mip_log.best_bound()),
mip_best_time=get_computation_time_label(mip_log.best_cost_time()),
team_best_cost=cp_team_log.best_cost(),
team_best_gap=get_gap(cp_team_log.best_cost(), mip_log.best_bound()),
team_best_delta=get_gap(cp_team_log.best_cost(), mip_log.best_cost()),
team_best_time=get_computation_time_label(cp_team_log.best_cost_time()),
windows_best_cost=cp_window_log.best_cost(),
windows_best_gap=get_gap(cp_window_log.best_cost(), mip_log.best_bound()),
windows_best_delta=get_gap(cp_window_log.best_cost(), mip_log.best_cost()),
windows_best_time=get_computation_time_label(cp_window_log.best_cost_time())))
data_frame = pandas.DataFrame(data=data)
def get_duration_label(time_delta: datetime.timedelta) -> str:
assert time_delta.days == 0
hours = int(time_delta.total_seconds() / 3600)
minutes = int(time_delta.total_seconds() / 60 - hours * 60)
seconds = int(time_delta.total_seconds() - 3600 * hours - 60 * minutes)
# return '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
return '{0:,.0f}'.format(time_delta.total_seconds())
def get_cost_label(cost: float) -> str:
return '{0:,.0f}'.format(cost)
def get_gap_label(gap: float) -> str:
return '{0:,.2f}'.format(gap)
def get_problem_label(problem, date: datetime.date):
label = '{0:2d} {1}'.format(date.day, problem.Visits)
if problem.Visits2 == 0:
return label
return label + '/' + str(problem.Visits2)
print_data = []
for problem_config, mip_log, cp_team_log, cp_window_log in logs:
best_cost = min([mip_log.best_cost(), cp_team_log.best_cost(), cp_window_log.best_cost()])
print_data.append(collections.OrderedDict(Problem=get_problem_label(problem_config, cp_team_log.date),
Penalty=get_cost_label(cp_team_log.missed_visit_penalty),
LB=get_cost_label(mip_log.best_bound()),
MIP_COST=get_cost_label(mip_log.best_cost()),
MIP_GAP=get_gap_label(get_gap(mip_log.best_cost(), mip_log.best_bound())),
MIP_DELTA=get_gap_label(get_delta(mip_log.best_cost(), best_cost)),
MIP_TIME=get_duration_label(mip_log.best_cost_time()),
TEAMS_GAP=get_gap_label(get_gap(cp_team_log.best_cost(), mip_log.best_bound())),
TEAMS_DELTA=get_gap_label(get_delta(cp_team_log.best_cost(), best_cost)),
TEAMS_COST=get_cost_label(cp_team_log.best_cost()),
TEAMS_Time=get_duration_label(cp_team_log.best_cost_time()),
WINDOWS_COST=get_cost_label(cp_window_log.best_cost()),
WINDOWS_GAP=get_gap_label(get_gap(cp_window_log.best_cost(), mip_log.best_bound())),
WINDOWS_DELTA=get_gap_label(get_delta(cp_window_log.best_cost(), best_cost)),
WINDOWS_TIME=get_duration_label(cp_window_log.best_cost_time())
))
data_frame = pandas.DataFrame(data=print_data)
print(tabulate.tabulate(
data_frame[['Problem', 'Penalty', 'LB', 'MIP_COST', 'MIP_TIME', 'TEAMS_COST', 'TEAMS_Time', 'WINDOWS_COST', 'WINDOWS_TIME']],
tablefmt='latex', headers='keys', showindex=False))
print(tabulate.tabulate(
data_frame[['Problem', 'MIP_GAP', 'MIP_DELTA', 'MIP_TIME', 'TEAMS_GAP', 'TEAMS_DELTA', 'TEAMS_Time', 'WINDOWS_GAP', 'WINDOWS_DELTA',
'WINDOWS_TIME']],
tablefmt='latex', headers='keys', showindex=False))
@functools.total_ordering
class ProblemMetadata:
WINDOW_LABELS = ['', 'F', 'S', 'M', 'L', 'A']
def __init__(self, case: int, visits: int, windows: int):
assert visits == 20 or visits == 50 or visits == 80
assert 0 <= windows < len(ProblemMetadata.WINDOW_LABELS)
self.__case = case
self.__visits = visits
self.__windows = windows
def __eq__(self, other) -> bool:
if isinstance(other, ProblemMetadata):
return self.case == other.case and self.visits == other.visits and self.__windows == other.windows
return False
def __neq__(self, other) -> bool:
return not (self == other)
def __lt__(self, other) -> bool:
assert isinstance(other, ProblemMetadata)
if self.windows != other.windows:
return self.windows < other.windows
if self.visits != other.visits:
return self.visits < other.visits
if self.case != other.case:
return self.case < other.case
return False
@property
def label(self) -> str:
return '{0:>2}{1}'.format(self.instance_number, self.windows_label)
@property
def windows(self) -> int:
return self.__windows
@property
def windows_label(self) -> str:
return ProblemMetadata.WINDOW_LABELS[self.__windows]
@property
def visits(self) -> int:
return self.__visits
@property
def case(self) -> int:
return self.__case
@property
def instance_number(self) -> int:
if self.__visits == 20:
return self.__case
if self.__visits == 50:
return 5 + self.__case
return 8 + self.__case
def compare_literature_table(args, settings):
LIU2019 = 'liu2019'
AFIFI2016 = 'afifi2016'
DECERLE2018 = 'decerle2018'
GAYRAUD2015 = 'gayraud2015'
PARRAGH2018 = 'parragh2018'
BREDSTROM2008 = 'bredstrom2008combined'
BREDSTROM2007 = 'bredstrom2007branchandprice'
InstanceConfig = collections.namedtuple('InstanceConfig', ['name', 'nickname', 'result', 'who', 'is_optimal'])
instance_data = [
InstanceConfig(name='case_1_20_4_2_1', nickname='1N', result=5.13, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_1', nickname='2N', result=4.98, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_1', nickname='3N', result=5.19, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_1', nickname='4N', result=7.21, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_1', nickname='5N', result=5.37, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_1', nickname='6N', result=14.45, who=DECERLE2018, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_1', nickname='7N', result=13.02, who=DECERLE2018, is_optimal=True),
InstanceConfig(name='case_3_50_10_5_1', nickname='8N', result=34.94, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_1', nickname='9N', result=43.48, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_2_80_16_8_1', nickname='10N', result=12.08, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_1_20_4_2_2', nickname='1S', result=3.55, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_2', nickname='2S', result=4.27, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_2', nickname='3S', result=3.63, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_2', nickname='4S', result=6.14, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_2', nickname='5S', result=3.93, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_2', nickname='6S', result=8.14, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_2', nickname='7S', result=8.39, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_50_10_5_2', nickname='8S', result=9.54, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_2', nickname='9S', result=11.93, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_2', nickname='10S', result=8.54, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_3', nickname='1M', result=3.55, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_3', nickname='2M', result=3.58, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_3', nickname='3M', result=3.33, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_3', nickname='4M', result=5.67, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_3', nickname='5M', result=3.53, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_3', nickname='6M', result=7.7, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_50_10_5_3', nickname='7M', result=7.48, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_3', nickname='8M', result=8.54, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_3', nickname='9M', result=10.92, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_3', nickname='10M', result=7.62, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_4', nickname='1L', result=3.39, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_4', nickname='2L', result=3.42, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_4', nickname='3L', result=3.29, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_4', nickname='4L', result=5.13, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_4', nickname='5L', result=3.34, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_4', nickname='6L', result=7.14, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_4', nickname='7L', result=6.88, who=BREDSTROM2007, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_4', nickname='8L', result=8, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_1_80_16_8_4', nickname='9L', result=10.43, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_4', nickname='10L', result=7.36, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_5', nickname='1H', result=2.95, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_2_20_4_2_5', nickname='2H', result=2.88, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_3_20_4_2_5', nickname='3H', result=2.74, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_4_20_4_2_5', nickname='4H', result=4.29, who=GAYRAUD2015, is_optimal=False),
InstanceConfig(name='case_5_20_4_2_5', nickname='5H', result=2.81, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_1_50_10_5_5', nickname='6H', result=6.48, who=DECERLE2018, is_optimal=False),
InstanceConfig(name='case_2_50_10_5_5', nickname='7H', result=5.71, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_5', nickname='8H', result=6.52, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_1_80_16_8_5', nickname='9H', result=8.51, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_5', nickname='10H', result=6.31, who=PARRAGH2018, is_optimal=False)
]
instance_dirs = ['/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case20',
'/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case50',
'/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case80']
instance_dict = {instance.name: instance for instance in instance_data}
print_data = []
instance_pattern = re.compile(r'case_(?P<case>\d+)_(?P<visits>\d+)_(?P<carers>\d+)_(?P<synchronized_visits>\d+)_(?P<windows>\d+)')
instance_counter = 1
last_visits = None
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for instance_dir in instance_dirs:
for instance in instance_data:
instance_log_path = os.path.join(instance_dir, instance.name + '.dat.err.log')
if not os.path.exists(instance_log_path):
continue
solver_logs = read_traces(instance_log_path)
if not solver_logs:
continue
instance = instance_dict[instance.name]
name_match = instance_pattern.match(instance.name)
if not name_match:
continue
first_solver_logs = solver_logs[0]
case = int(name_match.group('case'))
visits = int(name_match.group('visits'))
carers = int(name_match.group('carers'))
synchronized_visits = int(name_match.group('synchronized_visits'))
windows_configuration = int(name_match.group('windows'))
problem_meta = ProblemMetadata(case, visits, windows_configuration)
if last_visits and last_visits != visits:
instance_counter = 1
normalized_result = float('inf')
if first_solver_logs.best_cost(3) < 100:
normalized_result = round(first_solver_logs.best_cost(3), 2)
delta = round((instance.result - normalized_result) / instance.result * 100, 2)
printable_literature_result = str(instance.result)
if instance.is_optimal:
printable_literature_result += '*'
printable_literature_result += 'cite{{{0}}}'.format(instance.who)
print_data.append(collections.OrderedDict(
metadata=problem_meta,
problem=problem_meta.label,
case=instance_counter,
v1=visits - 2 * synchronized_visits,
v2=synchronized_visits,
carers=carers,
time_windows=problem_meta.windows_label,
literature_result=printable_literature_result,
result=normalized_result,
delta=delta,
time=round(first_solver_logs.best_cost_time(3).total_seconds(), 2) if normalized_result != float('inf') else float('inf')
))
last_visits = visits
instance_counter += 1
print_data.sort(key=lambda dict_obj: dict_obj['metadata'])
print(tabulate.tabulate(
pandas.DataFrame(data=print_data)[['problem', 'carers', 'v1', 'v2', 'literature_result', 'result', 'time', 'delta']],
showindex=False,
tablefmt='latex', headers='keys'))
def compare_planner_optimizer_quality(args, settings):
data_file = getattr(args, __FILE_ARG)
data_frame = pandas.read_csv(data_file)
figsize = (2.5, 5)
labels = ['Planners', 'Algorithm']
data_frame['travel_time'] = data_frame['Travel Time'].apply(parse_pandas_duration)
data_frame['span'] = data_frame['Span'].apply(parse_pandas_duration)
data_frame['overtime'] = data_frame['Overtime'].apply(parse_pandas_duration)
data_frame_planners = data_frame[data_frame['Type'] == 'Planners']
data_frame_solver = data_frame[data_frame['Type'] == 'Solver']
overtime_per_carer = [list((data_frame_planners['overtime'] / data_frame_planners['Carers']).values),
list((data_frame_solver['overtime'] / data_frame_solver['Carers']).values)]
def to_matplotlib_minutes(value):
return value * 60 * 1000000000
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(overtime_per_carer, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Overtime per Carer [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(10), to_matplotlib_minutes(20), to_matplotlib_minutes(30)])
fig.tight_layout()
rows.plot.save_figure('quality_boxplot_overtime')
travel_time_per_carer = [list((data_frame_planners['travel_time'] / data_frame_planners['Carers']).values),
list((data_frame_solver['travel_time'] / data_frame_solver['Carers']).values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(travel_time_per_carer, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Travel Time per Carer [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(30), to_matplotlib_minutes(60),
to_matplotlib_minutes(90), to_matplotlib_minutes(120)])
fig.tight_layout()
rows.plot.save_figure('quality_boxplot_travel_time')
span_per_client = [list((data_frame_planners['span'] / data_frame_planners['Clients']).values),
list((data_frame_solver['span'] / data_frame_solver['Clients']).values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(span_per_client, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Visit Span per Client [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(6 * 60), to_matplotlib_minutes(7 * 60), to_matplotlib_minutes(8 * 60),
to_matplotlib_minutes(9 * 60)])
ax.set_ylim(bottom=6 * 60 * 60 * 1000000000)
fig.tight_layout()
rows.plot.save_figure('quality_span')
teams = [list(data_frame_planners['Teams'].values), list(data_frame_solver['Teams'].values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(teams, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Teams of 2 Carers')
fig.tight_layout()
rows.plot.save_figure('quality_teams')
better_matching = [list(data_frame_planners['Better Matching'].values),
list(data_frame_solver['Better Matching'].values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(better_matching, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Better Client-Carer Matching')
fig.tight_layout()
rows.plot.save_figure('quality_matching')
def parse_percent(value):
value_to_use = value.replace('%', '')
return float(value_to_use) / 100.0
def parse_duration_seconds(value):
return datetime.timedelta(seconds=value)
def compare_benchmark(args, settings):
data_file_path = getattr(args, __FILE_ARG)
data_frame = pandas.read_csv(data_file_path)
data_frame['relative_cost_difference'] = data_frame['Relative Cost Difference'].apply(parse_percent)
data_frame['relative_gap'] = data_frame['Relative Gap'].apply(parse_percent)
data_frame['time'] = data_frame['Time'].apply(parse_duration_seconds)
matplotlib.rcParams.update({'font.size': 18})
labels = ['MS', 'IP']
low_labels = ['Gap', 'Delta', 'Time']
cp_frame = data_frame[data_frame['Solver'] == 'CP']
mip_frame = data_frame[data_frame['Solver'] == 'MIP']
def get_series(frame, configuration):
num_visits, num_visits_of_2 = configuration
filtered_frame = frame[(frame['Visits'] == num_visits) & (frame['Synchronized Visits'] == num_visits_of_2)]
return [filtered_frame['relative_gap'].values, filtered_frame['relative_cost_difference'].values,
filtered_frame['time'].values]
def seconds(value):
return value * 1000000000
def minutes(value):
return 60 * seconds(value)
def hours(value):
return 3600 * seconds(value)
limit_configurations = [[[None, minutes(1) + seconds(15)], [0, minutes(9)]],
[[None, minutes(1) + seconds(30)], [0, hours(4) + minutes(30)]],
[[0, minutes(3) + seconds(30)], [0, hours(4) + minutes(30)]],
[[0, minutes(3) + seconds(30)], [0, hours(4) + minutes(30)]]]
yticks_configurations = [
[[0, seconds(15), seconds(30), seconds(45), minutes(1)], [0, minutes(1), minutes(2), minutes(4), minutes(8)]],
[[0, seconds(15), seconds(30), seconds(45), minutes(1), minutes(1) + seconds(15)],
[0, hours(1), hours(2), hours(3), hours(4)]],
[[0, minutes(1), minutes(2), minutes(3)], [0, hours(1), hours(2), hours(3), hours(4)]],
[[0, minutes(1), minutes(2), minutes(3)], [0, hours(1), hours(2), hours(3), hours(4)]]]
problem_configurations = [(25, 0), (25, 5), (50, 0), (50, 10)]
def format_timedelta_pandas(x, pos=None):
if x < 0:
return None
time_delta = pandas.to_timedelta(x)
hours = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_HOUR)
minutes = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_MIN) - 60 * hours
seconds = int(time_delta.total_seconds() - 3600 * hours - 60 * minutes)
return '{0:01d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
def format_percent(x, pox=None):
return int(x * 100.0)
for index, problem_config in enumerate(problem_configurations):
fig, axes = matplotlib.pyplot.subplots(1, 2)
cp_gap, cp_delta, cp_time = get_series(cp_frame, problem_config)
mip_gap, mip_delta, mip_time = get_series(mip_frame, problem_config)
cp_time_limit, mip_time_limit = limit_configurations[index]
cp_yticks, mip_yticks = yticks_configurations[index]
cp_ax, mip_ax = axes
first_color_config = dict(flierprops=dict(marker='.'),
medianprops=dict(color=FOREGROUND_COLOR),
boxprops=dict(color=FOREGROUND_COLOR),
whiskerprops=dict(color=FOREGROUND_COLOR),
capprops=dict(color=FOREGROUND_COLOR))
second_color_config = dict(flierprops=dict(marker='.'),
medianprops=dict(color=FOREGROUND_COLOR2),
boxprops=dict(color=FOREGROUND_COLOR2),
whiskerprops=dict(color=FOREGROUND_COLOR2),
capprops=dict(color=FOREGROUND_COLOR2))
cp_ax.boxplot([cp_gap, cp_delta, []], **second_color_config)
cp_twinx = cp_ax.twinx()
cp_twinx.boxplot([[], [], cp_time], **first_color_config)
cp_twinx.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
cp_ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_percent))
cp_twinx.tick_params(axis='y', labelcolor=FOREGROUND_COLOR)
cp_ax.set_xlabel('Multistage')
cp_ax.set_xticklabels(low_labels, rotation=45)
cp_ax.set_ylim(bottom=-0.05, top=1)
cp_ax.set_ylabel('Delta, Gap [%]')
cp_twinx.set_ylim(bottom=cp_time_limit[0], top=cp_time_limit[1])
if cp_yticks:
cp_twinx.set_yticks(cp_yticks)
mip_ax.boxplot([mip_gap, mip_delta, []], **second_color_config)
mip_twinx = mip_ax.twinx()
mip_twinx.boxplot([[], [], mip_time], **first_color_config)
mip_twinx.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
mip_twinx.tick_params(axis='y', labelcolor=FOREGROUND_COLOR)
mip_ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_percent))
mip_ax.set_xlabel('IP')
mip_ax.set_xticklabels(low_labels, rotation=45)
mip_ax.set_ylim(bottom=-0.05, top=1)
mip_twinx.set_ylabel('Computation Time [H:MM:SS]', color=FOREGROUND_COLOR)
mip_twinx.set_ylim(bottom=mip_time_limit[0], top=mip_time_limit[1])
if mip_yticks:
mip_twinx.set_yticks(mip_yticks)
fig.tight_layout(w_pad=0.0)
rows.plot.save_figure('benchmark_boxplot_{0}_{1}'.format(problem_config[0], problem_config[1]))
matplotlib.pyplot.cla()
matplotlib.pyplot.close(fig)
def old_debug(args, settings):
problem = rows.plot.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
solution_file = get_or_raise(args, __SOLUTION_FILE_ARG)
schedule = rows.plot.load_schedule(solution_file)
schedule_date = schedule.metadata.begin
carer_dairies = {
carer_shift.carer.sap_number:
next((diary for diary in carer_shift.diaries if diary.date == schedule_date), None)
for carer_shift in problem.carers}
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
data_set = []
with rows.plot.create_routing_session() as session:
for route in schedule.routes():
travel_time = datetime.timedelta()
for source, destination in route.edges():
source_loc = location_finder.find(source.visit.service_user)
if not source_loc:
logging.error('Failed to resolve location of %s', source.visit.service_user)
continue
destination_loc = location_finder.find(destination.visit.service_user)
if not destination_loc:
logging.error('Failed to resolve location of %s', destination.visit.service_user)
continue
distance = session.distance(source_loc, destination_loc)
if distance is None:
logging.error('Distance cannot be estimated between %s and %s', source_loc, destination_loc)
continue
travel_time += datetime.timedelta(seconds=distance)
service_time = datetime.timedelta()
for visit in route.visits:
if visit.check_in and visit.check_out:
observed_duration = visit.check_out - visit.check_in
if observed_duration.days < 0:
logging.error('Observed duration %s is negative', observed_duration)
service_time += observed_duration
else:
logging.warning(
'Visit %s is not supplied with information on check-in and check-out information',
visit.key)
service_time += visit.duration
available_time = functools.reduce(operator.add, (event.duration
for event in
carer_dairies[route.carer.sap_number].events))
data_set.append([route.carer.sap_number,
available_time,
service_time,
travel_time,
float(service_time.total_seconds() + travel_time.total_seconds())
/ available_time.total_seconds()])
data_set.sort(key=operator.itemgetter(4))
data_frame = pandas.DataFrame(columns=['Carer', 'Availability', 'Service', 'Travel', 'Usage'], data=data_set)
figure, axis = matplotlib.pyplot.subplots()
indices = numpy.arange(len(data_frame.index))
time_delta_converter = rows.plot.TimeDeltaConverter()
width = 0.35
travel_series = numpy.array(time_delta_converter(data_frame.Travel))
service_series = numpy.array(time_delta_converter(data_frame.Service))
idle_overtime_series = list(data_frame.Availability - data_frame.Travel - data_frame.Service)
idle_series = numpy.array(time_delta_converter(
map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(time_delta_converter(
map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(), idle_overtime_series)))
service_handle = axis.bar(indices, service_series, width, bottom=time_delta_converter.zero)
travel_handle = axis.bar(indices, travel_series, width,
bottom=service_series + time_delta_converter.zero_num)
idle_handle = axis.bar(indices, idle_series, width,
bottom=service_series + travel_series + time_delta_converter.zero_num)
overtime_handle = axis.bar(indices, overtime_series, width,
bottom=idle_series + service_series + travel_series + time_delta_converter.zero_num)
axis.yaxis_date()
axis.yaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
axis.legend((travel_handle, service_handle, idle_handle, overtime_handle),
('Travel', 'Service', 'Idle', 'Overtime'), loc='upper right')
matplotlib.pyplot.show()
def show_working_hours(args, settings):
__WIDTH = 0.25
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
shift_file = get_or_raise(args, __FILE_ARG)
shift_file_base_name, shift_file_ext = os.path.splitext(os.path.basename(shift_file))
output_file_base_name = getattr(args, __OUTPUT, shift_file_base_name)
__EVENT_TYPE_OFFSET = {'assumed': 2, 'contract': 1, 'work': 0}
__EVENT_TYPE_COLOR = {'assumed': color_map.colors[0], 'contract': color_map.colors[4], 'work': color_map.colors[2]}
handles = {}
frame = pandas.read_csv(shift_file)
dates = frame['day'].unique()
for current_date in dates:
frame_to_use = frame[frame['day'] == current_date]
carers = frame_to_use['carer'].unique()
figure, axis = matplotlib.pyplot.subplots()
try:
current_date_to_use = datetime.datetime.strptime(current_date, '%Y-%m-%d')
carer_index = 0
for carer in carers:
carer_frame = frame_to_use[frame_to_use['carer'] == carer]
axis.bar(carer_index + 0.25, 24 * 3600, 0.75, bottom=0, color='grey', alpha=0.3)
for index, row in carer_frame.iterrows():
event_begin = datetime.datetime.strptime(row['begin'], '%Y-%m-%d %H:%M:%S')
event_end = datetime.datetime.strptime(row['end'], '%Y-%m-%d %H:%M:%S')
handle = axis.bar(carer_index + __EVENT_TYPE_OFFSET[row['event type']] * __WIDTH,
(event_end - event_begin).total_seconds(),
__WIDTH,
bottom=(event_begin - current_date_to_use).total_seconds(),
color=__EVENT_TYPE_COLOR[row['event type']])
handles[row['event type']] = handle
carer_index += 1
axis.legend([handles['work'], handles['contract'], handles['assumed']],
['Worked', 'Available', 'Forecast'], loc='upper right')
axis.grid(linestyle='dashed')
axis.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_time))
axis.yaxis.set_ticks(numpy.arange(0, 24 * 3600, 2 * 3600))
axis.set_ylim(6 * 3600, 24 * 60 * 60)
rows.plot.save_figure(output_file_base_name + '_' + current_date)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compute_overtime(frame):
idle_overtime_series = list(frame.Availability - frame.Travel - frame.Service)
idle_series = numpy.array(
list(map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(list(map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(), idle_overtime_series)))
return overtime_series
class Node:
def __init__(self,
index: int,
next: int,
visit: rows.model.visit.Visit,
visit_start_min: datetime.datetime,
visit_start_max: datetime.datetime,
break_start: typing.Optional[datetime.datetime],
break_duration: datetime.timedelta,
travel_duration: datetime.timedelta):
self.__index = index
self.__next = next
self.__visit = visit
self.__visit_start_min = visit_start_min
self.__visit_start_max = visit_start_max
self.__break_start = break_start
self.__break_duration = break_duration
self.__travel_duration = travel_duration
@property
def index(self) -> int:
return self.__index
@property
def next(self) -> int:
return self.__next
@property
def visit_key(self) -> int:
return self.__visit.key
@property
def visit_start(self) -> datetime.datetime:
return datetime.datetime.combine(self.__visit.date, self.__visit.time)
@property
def visit_start_min(self) -> datetime.datetime:
return self.__visit_start_min
@property
def visit_start_max(self) -> datetime.datetime:
return self.__visit_start_max
@property
def carer_count(self) -> int:
return self.__visit.carer_count
@property
def visit_duration(self) -> datetime.timedelta:
return self.__visit.duration
@property
def break_start(self) -> datetime.datetime:
return self.__break_start
@property
def break_duration(self) -> datetime.timedelta:
return self.__break_duration
@property
def travel_duration(self) -> datetime.timedelta:
return self.__travel_duration
@property
def service_user(self) -> str:
return self.__visit.service_user
class Mapping:
def __init__(self, routes, problem, settings, time_window_span):
self.__index_to_node = {}
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
user_tag_finder.reload()
local_routes = {}
current_index = 0
def find_visit(item) -> rows.model.visit.Visit:
current_diff = sys.maxsize
visit_match = None
for visit_batch in problem.visits:
if visit_batch.service_user != item.service_user:
continue
for visit in visit_batch.visits:
if visit.date != item.date or visit.tasks != item.tasks:
continue
if item.key == visit.key:
# exact match
return visit
visit_total_time = visit.time.hour * 3600 + visit.time.minute * 60
item_total_time = item.time.hour * 3600 + item.time.minute * 60
diff_total_time = abs(visit_total_time - item_total_time)
if diff_total_time <= time_window_span.total_seconds() and diff_total_time < current_diff:
visit_match = visit
current_diff = diff_total_time
assert visit_match is not None
return visit_match
current_index = 0
with rows.plot.create_routing_session() as routing_session:
for route in routes:
local_route = []
previous_visit = None
previous_index = None
current_visit = None
break_start = None
break_duration = datetime.timedelta()
for item in route.nodes:
if isinstance(item, rows.model.past_visit.PastVisit):
current_visit = item.visit
if previous_visit is None:
if break_start is None:
diary = problem.get_diary(route.carer, current_visit.date)
break_start = diary.events[0].begin - datetime.timedelta(minutes=30)
node = Node(current_index,
current_index + 1,
rows.model.visit.Visit(date=current_visit.date,
time=break_start,
duration=datetime.timedelta(),
service_user=current_visit.service_user),
break_start,
break_start,
break_start,
break_duration,
datetime.timedelta())
self.__index_to_node[current_index] = node
local_route.append(node)
current_index += 1
previous_visit = current_visit
previous_index = current_index
break_start = None
break_duration = datetime.timedelta()
current_index += 1
continue
previous_location = user_tag_finder.find(previous_visit.service_user)
current_location = user_tag_finder.find(current_visit.service_user)
travel_time = datetime.timedelta(seconds=routing_session.distance(previous_location, current_location))
previous_visit_match = find_visit(previous_visit)
node = Node(previous_index,
current_index,
previous_visit,
previous_visit_match.datetime - time_window_span,
previous_visit_match.datetime + time_window_span,
break_start,
break_duration,
travel_time)
self.__index_to_node[previous_index] = node
local_route.append(node)
break_start = None
break_duration = datetime.timedelta()
previous_visit = current_visit
previous_index = current_index
current_index += 1
if isinstance(item, rows.model.rest.Rest):
if break_start is None:
break_start = item.start_time
else:
break_start = item.start_time - break_duration
break_duration += item.duration
visit_match = find_visit(previous_visit)
node = Node(previous_index,
-1,
previous_visit,
visit_match.datetime - time_window_span,
visit_match.datetime + time_window_span,
break_start,
break_duration,
datetime.timedelta())
self.__index_to_node[previous_index] = node
local_route.append(node)
local_routes[route.carer] = local_route
self.__routes = local_routes
service_user_to_index = collections.defaultdict(list)
for index in self.__index_to_node:
node = self.__index_to_node[index]
service_user_to_index[node.service_user].append(index)
self.__siblings = {}
for service_user in service_user_to_index:
num_indices = len(service_user_to_index[service_user])
for left_pos in range(num_indices):
left_index = service_user_to_index[service_user][left_pos]
left_visit = self.__index_to_node[left_index]
if left_visit.carer_count == 1:
continue
for right_pos in range(left_pos + 1, num_indices):
right_index = service_user_to_index[service_user][right_pos]
right_visit = self.__index_to_node[right_index]
if right_visit.carer_count == 1:
continue
if left_visit.visit_start_min == right_visit.visit_start_min and left_visit.visit_start_max == right_visit.visit_start_max:
assert left_index != right_index
self.__siblings[left_index] = right_index
self.__siblings[right_index] = left_index
def indices(self):
return list(self.__index_to_node.keys())
def routes(self) -> typing.Dict[rows.model.carer.Carer, typing.List[Node]]:
return self.__routes
def node(self, index: int) -> Node:
return self.__index_to_node[index]
def find_index(self, visit_key: int) -> int:
for index in self.__index_to_node:
if self.__index_to_node[index].visit_key == visit_key:
return index
return None
def sibling(self, index: int) -> typing.Optional[Node]:
if index in self.__siblings:
sibling_index = self.__siblings[index]
return self.__index_to_node[sibling_index]
return None
def graph(self) -> networkx.DiGraph:
edges = []
for carer in self.__routes:
for node in self.__routes[carer]:
if node.next != -1:
assert node.index != node.next
edges.append([node.index, node.next])
sibling_node = self.sibling(node.index)
if sibling_node is not None:
if node.index < sibling_node.index:
assert node.index != sibling_node.index
edges.append([node.index, sibling_node.index])
if node.next != -1:
assert sibling_node.index != node.next
edges.append([sibling_node.index, node.next])
return networkx.DiGraph(edges)
def create_mapping(settings, problem, schedule) -> Mapping:
mapping_time_windows_span = datetime.timedelta(minutes=90)
return Mapping(schedule.routes, problem, settings, mapping_time_windows_span)
class StartTimeEvaluator:
def __init__(self, mapping: Mapping, problem: rows.model.problem.Problem, schedule: rows.model.schedule.Schedule):
self.__mapping = mapping
self.__problem = problem
self.__schedule = schedule
self.__sorted_indices = list(networkx.topological_sort(self.__mapping.graph()))
self.__initial_start_times = self.__get_initial_start_times()
def get_start_times(self, duration_callback) -> typing.List[datetime.datetime]:
start_times = copy.copy(self.__initial_start_times)
for index in self.__sorted_indices:
node = self.__mapping.node(index)
current_sibling_node = self.__mapping.sibling(node.index)
if current_sibling_node:
max_start_time = max(start_times[node.index], start_times[current_sibling_node.index])
start_times[node.index] = max_start_time
if max_start_time > start_times[current_sibling_node.index]:
start_times[current_sibling_node.index] = max_start_time
if current_sibling_node.next is not None and current_sibling_node.next != -1:
start_times[current_sibling_node.next] = self.__get_next_arrival(current_sibling_node, start_times, duration_callback)
if node.next is None or node.next == -1:
continue
next_arrival = self.__get_next_arrival(node, start_times, duration_callback)
if next_arrival > start_times[node.next]:
start_times[node.next] = next_arrival
return start_times
def get_delays(self, start_times: typing.List[datetime.datetime]) -> typing.List[datetime.timedelta]:
return [start_times[index] - self.__mapping.node(index).visit_start_max for index in self.__mapping.indices()]
def __get_next_arrival(self, local_node: Node, start_times, duration_callback) -> datetime.datetime:
break_done = False
if local_node.break_duration is not None \
and local_node.break_start is not None \
and local_node.break_start + local_node.break_duration <= start_times[local_node.index]:
break_done = True
local_visit_key = self.__mapping.node(local_node.index).visit_key
local_next_arrival = start_times[local_node.index] + duration_callback(local_visit_key) + local_node.travel_duration
if not break_done and local_node.break_start is not None:
if local_next_arrival >= local_node.break_start:
local_next_arrival += local_node.break_duration
else:
local_next_arrival = local_node.break_start + local_node.break_duration
return local_next_arrival
def __get_initial_start_times(self) -> typing.List[datetime.datetime]:
start_times = [self.__mapping.node(index).visit_start_min for index in self.__mapping.indices()]
carer_routes = self.__mapping.routes()
for carer in carer_routes:
diary = self.__problem.get_diary(carer, self.__schedule.date)
assert diary is not None
nodes = carer_routes[carer]
nodes_it = iter(nodes)
first_visit_node = next(nodes_it)
start_min = max(first_visit_node.visit_start_min, diary.events[0].begin - datetime.timedelta(minutes=30))
start_times[first_visit_node.index] = start_min
for node in nodes_it:
start_min = max(node.visit_start_min, diary.events[0].begin - datetime.timedelta(minutes=30))
start_times[node.index] = start_min
return start_times
class EssentialRiskinessEvaluator:
def __init__(self, settings, history, problem, schedule):
self.__settings = settings
self.__history = history
self.__problem = problem
self.__schedule = schedule
self.__schedule_start = datetime.datetime.combine(self.__schedule.date, datetime.time())
self.__mapping = None
self.__sample = None
self.__start_times = None
self.__delay = None
def run(self):
self.__mapping = create_mapping(self.__settings, self.__problem, self.__schedule)
history_time_windows_span = datetime.timedelta(hours=2)
self.__sample = self.__history.build_sample(self.__problem, self.__schedule.date, history_time_windows_span)
self.__start_times = [[datetime.datetime.max for _ in range(self.__sample.size)] for _ in self.__mapping.indices()]
self.__delay = [[datetime.timedelta.max for _ in range(self.__sample.size)] for _ in self.__mapping.indices()]
start_time_evaluator = StartTimeEvaluator(self.__mapping, self.__problem, self.__schedule)
for scenario in range(self.__sample.size):
def get_visit_duration(visit_key: int) -> datetime.timedelta:
if visit_key is None:
return datetime.timedelta()
return self.__sample.visit_duration(visit_key, scenario)
scenario_start_times = start_time_evaluator.get_start_times(get_visit_duration)
delay = start_time_evaluator.get_delays(scenario_start_times)
for index in range(len(scenario_start_times)):
self.__start_times[index][scenario] = scenario_start_times[index]
self.__delay[index][scenario] = delay[index]
def calculate_index(self, visit_key: int) -> float:
visit_index = self.__find_index(visit_key)
records = [local_delay.total_seconds() for local_delay in self.__delay[visit_index]]
records.sort()
num_records = len(records)
if records[num_records - 1] <= 0:
return 0.0
total_delay = 0.0
position = num_records - 1
while position >= 0 and records[position] >= 0:
total_delay += records[position]
position -= 1
if position == -1:
return float('inf')
delay_budget = 0
while position > 0 and delay_budget + float(position + 1) * records[position] + total_delay > 0:
delay_budget += records[position]
position -= 1
delay_balance = delay_budget + float(position + 1) * records[position] + total_delay
if delay_balance < 0:
riskiness_index = min(0.0, records[position + 1])
assert riskiness_index <= 0.0
remaining_balance = total_delay + delay_budget + float(position + 1) * riskiness_index
assert remaining_balance >= 0.0
riskiness_index -= math.ceil(remaining_balance / float(position + 1))
assert riskiness_index * float(position + 1) + delay_budget + total_delay <= 0.0
return -riskiness_index
elif delay_balance > 0:
return float('inf')
else:
return records[position]
def get_delays(self, visit_key) -> typing.List[datetime.timedelta]:
index = self.__find_index(visit_key)
return self.__delay[index]
def find_carer(self, visit_key: int) -> typing.Optional[rows.model.carer.Carer]:
for carer in self.__mapping.routes():
for node in self.__mapping.routes()[carer]:
if node.visit_key == visit_key:
return carer
return None
def find_route(self, index: int) -> typing.Optional[typing.List[Node]]:
routes = self.__mapping.routes()
for carer in routes:
for node in routes[carer]:
if node.index == index:
return routes[carer]
return None
def print_route_for_visit(self, visit_key):
carer = self.find_carer(visit_key)
self.print_route(carer)
def print_route(self, carer):
route = self.__mapping.routes()[carer]
data = [['index', 'key', 'visit_start', 'visit_duration', 'travel_duration', 'break_start', 'break_duration']]
for node in route:
if node.visit_key is None:
duration = 0
else:
duration = int(self.__sample.visit_duration(node.visit_key, 0).total_seconds())
data.append([node.index,
node.visit_key,
int(self.__datetime_to_delta(self.__start_times[node.index][0]).total_seconds()),
duration,
int(node.travel_duration.total_seconds()),
int(self.__datetime_to_delta(node.break_start).total_seconds()) if node.break_start is not None else 0,
int(node.break_duration.total_seconds())])
print(tabulate.tabulate(data))
def print_start_times(self, visit_key: int):
print('Start Times - Visit {0}:'.format(visit_key))
selected_index = self.__find_index(visit_key)
for scenario_number in range(self.__sample.size):
print('{0:<4}{1}'.format(scenario_number,
int(self.__datetime_to_delta(self.__start_times[selected_index][scenario_number]).total_seconds())))
def print_delays(self, visit_key: int):
print('Delays - Visit {0}:'.format(visit_key))
selected_index = self.__find_index(visit_key)
for scenario_number in range(self.__sample.size):
print('{0:<4}{1}'.format(scenario_number, int(self.__delay[selected_index][scenario_number].total_seconds())))
def visit_keys(self) -> typing.List[int]:
visit_keys = [self.__mapping.node(index).visit_key for index in self.__mapping.indices() if self.__mapping.node(index).visit_key is not None]
visit_keys.sort()
return visit_keys
def __find_index(self, visit_key: int) -> typing.Optional[int]:
for index in self.__mapping.indices():
if self.__mapping.node(index).visit_key == visit_key:
return index
return None
def __datetime_to_delta(self, value: datetime.datetime) -> datetime.timedelta:
return value - self.__schedule_start
def to_frame(self):
records = []
for visit_index in self.__mapping.indices():
visit_key = self.__mapping.node(visit_index).visit_key
if visit_key is None:
continue
for scenario_number in range(self.__sample.size):
records.append({'visit': visit_key, 'scenario': scenario_number, 'delay': self.__delay[visit_index][scenario_number]})
return pandas.DataFrame(data=records)
@property
def mapping(self):
return self.__mapping
@property
def start_times(self):
return self.__start_times
@property
def delay(self):
return self.__delay
@staticmethod
def time_to_delta(time: datetime.time) -> datetime.timedelta:
seconds = time.hour * 3600 + time.minute * 60 + time.second
return datetime.timedelta(seconds=seconds)
def load_history():
root_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations/problems'
cached_path = os.path.join(root_dir, 'C350_history.pickle')
path = os.path.join(root_dir, 'C350_history.json')
import pickle
if os.path.exists(cached_path):
with open(cached_path, 'rb') as input_stream:
return pickle.load(input_stream)
with open(path, 'r') as input_stream:
history = rows.model.history.History.load(input_stream)
with open(cached_path, 'wb') as output_stream:
pickle.dump(history, output_stream)
return history
def compare_delay(args, settings):
compare_delay_visits_path = 'compare_delay_visits.hdf'
compare_instances_path = 'compare_instances.hdf'
def load_data():
root_problem_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations/solutions'
problem = rows.load.load_problem('/home/pmateusz/dev/cordia/simulations/current_review_simulations/problems/C350_past.json')
history = load_history()
cost_schedules \
= [rows.load.load_schedule(os.path.join(root_problem_dir, 'c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15)]
cost_traces = read_traces(os.path.join(root_problem_dir, 'c350past_distv90b90e30m1m1m5.err.log'))
risk_schedules = [rows.load.load_schedule(os.path.join(root_problem_dir, 'c350past_riskv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15)]
risk_traces = read_traces(os.path.join(root_problem_dir, 'c350past_riskv90b90e30m1m1m5.err.log'))
return problem, history, cost_schedules, cost_traces, risk_schedules, risk_traces
def get_visit_duration(visit_key: int) -> datetime.timedelta:
if visit_key is None:
return datetime.timedelta()
visit = history.get_visit(visit_key)
assert visit is not None
return visit.real_duration
def get_visit_delays(schedule: rows.model.schedule.Schedule) -> typing.Dict[int, datetime.timedelta]:
mapping = create_mapping(settings, problem, schedule)
delay_evaluator = StartTimeEvaluator(mapping, problem, schedule)
start_times = delay_evaluator.get_start_times(get_visit_duration)
delays = delay_evaluator.get_delays(start_times)
return {mapping.node(index).visit_key: delays[index] for index in range(len(delays))}
problem = None
if os.path.exists(compare_delay_visits_path):
visits_frame = pandas.read_hdf(compare_delay_visits_path)
else:
problem, history, cost_schedules, cost_traces, risk_schedules, risk_traces = load_data()
visit_data_set = []
for index in range(len(cost_schedules)):
cost_schedule = cost_schedules[index]
risk_schedule = risk_schedules[index]
schedule_date = cost_schedule.date
assert schedule_date == risk_schedule.date
cost_visit_delays = get_visit_delays(cost_schedule)
risk_visit_delays = get_visit_delays(risk_schedule)
visit_keys = set(cost_visit_delays.keys())
for visit_key in risk_visit_delays:
visit_keys.add(visit_key)
for visit_key in visit_keys:
record = collections.OrderedDict(visit_key=visit_key, date=schedule_date)
if visit_key in cost_visit_delays:
record['cost_delay'] = cost_visit_delays[visit_key]
if visit_key in risk_visit_delays:
record['risk_delay'] = risk_visit_delays[visit_key]
visit_data_set.append(record)
visits_frame = pandas.DataFrame(data=visit_data_set)
visits_frame.to_hdf(compare_delay_visits_path, key='a')
if os.path.exists(compare_instances_path):
instances_frame = | pandas.read_hdf(compare_instances_path) | pandas.read_hdf |
from sklearn.metrics import f1_score,accuracy_score
import numpy as np
from utilities.tools import load_model
import pandas as pd
def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=int)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=round(np.mean(preds[:,i]))
if i%100==0:
print(i ,' out of ',len(test_data_1))
print("test data accuracy: ", accuracy_score(final_labels,test_labels))
print("test data f_measure: ", f1_score(final_labels, test_labels))
submission = | pd.DataFrame({"Quality": final_labels}) | pandas.DataFrame |
import contextlib
import json
import gzip
import io
import logging
import os.path
import pickle
import random
import shutil
import sys
import tempfile
import traceback
import unittest
import pandas
COMMON_PRIMITIVES_DIR = os.path.join(os.path.dirname(__file__), 'common-primitives')
# NOTE: This insertion should appear before any code attempting to resolve or load primitives,
# so the git submodule version of `common-primitives` is looked at first.
sys.path.insert(0, COMMON_PRIMITIVES_DIR)
TEST_PRIMITIVES_DIR = os.path.join(os.path.dirname(__file__), 'data', 'primitives')
sys.path.insert(0, TEST_PRIMITIVES_DIR)
from common_primitives.column_parser import ColumnParserPrimitive
from common_primitives.construct_predictions import ConstructPredictionsPrimitive
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives.no_split import NoSplitDatasetSplitPrimitive
from common_primitives.random_forest import RandomForestClassifierPrimitive
from common_primitives.train_score_split import TrainScoreDatasetSplitPrimitive
from test_primitives.random_classifier import RandomClassifierPrimitive
from test_primitives.fake_score import FakeScorePrimitive
from d3m import cli, index, runtime, utils
from d3m.container import dataset as dataset_module
from d3m.contrib.primitives.compute_scores import ComputeScoresPrimitive
from d3m.metadata import base as metadata_base, pipeline as pipeline_module, pipeline_run as pipeline_run_module, problem as problem_module
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
PROBLEM_DIR = os.path.join(TEST_DATA_DIR, 'problems')
DATASET_DIR = os.path.join(TEST_DATA_DIR, 'datasets')
PIPELINE_DIR = os.path.join(TEST_DATA_DIR, 'pipelines')
class TestCLIRuntime(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
@classmethod
def setUpClass(cls):
to_register = {
'd3m.primitives.data_transformation.dataset_to_dataframe.Common': DatasetToDataFramePrimitive,
'd3m.primitives.classification.random_forest.Common': RandomForestClassifierPrimitive,
'd3m.primitives.classification.random_classifier.Test': RandomClassifierPrimitive,
'd3m.primitives.data_transformation.column_parser.Common': ColumnParserPrimitive,
'd3m.primitives.data_transformation.construct_predictions.Common': ConstructPredictionsPrimitive,
'd3m.primitives.evaluation.no_split_dataset_split.Common': NoSplitDatasetSplitPrimitive,
'd3m.primitives.evaluation.compute_scores.Test': FakeScorePrimitive,
'd3m.primitives.evaluation.train_score_dataset_split.Common': TrainScoreDatasetSplitPrimitive,
# We do not have to load this primitive, but loading it here prevents the package from loading all primitives.
'd3m.primitives.evaluation.compute_scores.Core': ComputeScoresPrimitive,
}
# To hide any logging or stdout output.
with utils.silence():
for python_path, primitive in to_register.items():
index.register_primitive(python_path, primitive)
def _call_cli_runtime(self, arg):
logger = logging.getLogger('d3m.runtime')
with utils.silence():
with self.assertLogs(logger=logger) as cm:
# So that at least one message is logged.
logger.warning("Debugging.")
cli.main(arg)
# We skip our "debugging" message.
return cm.records[1:]
def _call_cli_runtime_without_fail(self, arg):
try:
return self._call_cli_runtime(arg)
except Exception as e:
self.fail(traceback.format_exc())
def _assert_valid_saved_pipeline_runs(self, pipeline_run_save_path):
with open(pipeline_run_save_path, 'r') as f:
for pipeline_run_dict in list(utils.yaml_load_all(f)):
try:
pipeline_run_module.validate_pipeline_run(pipeline_run_dict)
except Exception as e:
self.fail(traceback.format_exc())
def _validate_previous_pipeline_run_ids(self, pipeline_run_save_path):
ids = set()
prev_ids = set()
with open(pipeline_run_save_path, 'r') as f:
for pipeline_run_dict in list(utils.yaml_load_all(f)):
ids.add(pipeline_run_dict['id'])
if 'previous_pipeline_run' in pipeline_run_dict:
prev_ids.add(pipeline_run_dict['previous_pipeline_run']['id'])
self.assertTrue(
prev_ids.issubset(ids),
'Some previous pipeline run ids {} are not in the set of pipeline run ids {}'.format(prev_ids, ids)
)
def test_fit_multi_input(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
def test_fit_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--save',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'--output',
output_csv_path,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11225, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11225, outputs_path='output.csv')
def test_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-no-problem-pipeline')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--save',
fitted_pipeline_path,
]
self._call_cli_runtime_without_fail(arg)
arg = [
'',
'runtime',
'produce',
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--output',
output_csv_path,
'--fitted-pipeline',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-no-problem-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11008, outputs_path='output.csv')
def test_fit_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--output',
output_csv_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11008, outputs_path='output.csv')
def test_nonstandard_fit_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--save',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'--not-standard-pipeline',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=10710, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_nonstandard_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--save',
fitted_pipeline_path,
'--not-standard-pipeline'
]
self._call_cli_runtime_without_fail(arg)
arg = [
'',
'runtime',
'produce',
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--fitted-pipeline',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=12106, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_nonstandard_fit_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--expose-produced-outputs',
self.test_dir,
'--not-standard-pipeline',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=12106, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_fit_produce_multi_input(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
def test_fit_score(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-score',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--scores',
os.path.join(self.test_dir, 'scores.csv'),
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(os.path.join(self.test_dir, 'scores.csv'))
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
def test_fit_score_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-score',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'F1_MACRO',
'--metric',
'ACCURACY',
'--scores',
os.path.join(self.test_dir, 'scores.csv'),
'-O',
pipeline_run_save_path,
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(os.path.join(self.test_dir, 'scores.csv'))
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
@staticmethod
def _get_iris_dataset_path():
return os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json')
@staticmethod
def _get_iris_problem_path():
return os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json')
@staticmethod
def _get_random_forest_pipeline_path():
return os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml')
@staticmethod
def _get_no_split_data_pipeline_path():
return os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml')
@staticmethod
def _get_train_test_split_data_pipeline_path():
return os.path.join(PIPELINE_DIR, 'data-preparation-train-test-split.yml')
def _get_pipeline_run_save_path(self):
return os.path.join(self.test_dir, 'pipeline_run.yml')
def _get_predictions_path(self):
return os.path.join(self.test_dir, 'predictions.csv')
def _get_scores_path(self):
return os.path.join(self.test_dir, 'scores.csv')
def _get_pipeline_rerun_save_path(self):
return os.path.join(self.test_dir, 'pipeline_rerun.yml')
def _get_rescores_path(self):
return os.path.join(self.test_dir, 'rescores.csv')
def _fit_iris_random_forest(
self, *, predictions_path=None, fitted_pipeline_path=None, pipeline_run_save_path=None
):
if pipeline_run_save_path is None:
pipeline_run_save_path = self._get_pipeline_run_save_path()
arg = [
'',
'runtime',
'fit',
'--input',
self._get_iris_dataset_path(),
'--problem',
self._get_iris_problem_path(),
'--pipeline',
self._get_random_forest_pipeline_path(),
'-O',
pipeline_run_save_path
]
if predictions_path is not None:
arg.append('--output')
arg.append(predictions_path)
if fitted_pipeline_path is not None:
arg.append('--save')
arg.append(fitted_pipeline_path)
self._call_cli_runtime_without_fail(arg)
def _fit_iris_random_classifier_without_problem(self, *, fitted_pipeline_path):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'-O',
pipeline_run_save_path
]
if fitted_pipeline_path is not None:
arg.append('--save')
arg.append(fitted_pipeline_path)
self._call_cli_runtime_without_fail(arg)
def test_fit(self):
pipeline_run_save_path = self._get_pipeline_run_save_path()
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
self._fit_iris_random_forest(
fitted_pipeline_path=fitted_pipeline_path, pipeline_run_save_path=pipeline_run_save_path
)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
self.assertTrue(os.path.isfile(pipeline_run_save_path))
def test_evaluate(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scores',
scores_path,
'--metric',
'ACCURACY',
'--metric',
'F1_MACRO',
'-O',
pipeline_run_save_path
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0], ['F1_MACRO', 1.0, 1.0, 0, 0]])
def test_evaluate_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'ACCURACY',
'--scores',
scores_path,
'-O',
pipeline_run_save_path
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0]])
def test_score(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_forest(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scores',
scores_path,
'--metric',
'F1_MACRO',
'--metric',
'ACCURACY',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = | pandas.read_csv(scores_path) | pandas.read_csv |
"""
Core implementation of :mod:`sklearndf.transformation.wrapper`
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.manifold import Isomap
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, PolynomialFeatures
from pytools.api import AllTracker
from ... import TransformerDF
from ...wrapper import TransformerWrapperDF
log = logging.getLogger(__name__)
__all__ = [
"BaseDimensionalityReductionWrapperDF",
"BaseMultipleInputsPerOutputTransformerWrapperDF",
"ColumnPreservingTransformerWrapperDF",
"ColumnSubsetTransformerWrapperDF",
"ComponentsDimensionalityReductionWrapperDF",
"FeatureSelectionWrapperDF",
"NComponentsDimensionalityReductionWrapperDF",
"NumpyTransformerWrapperDF",
"ColumnTransformerWrapperDF",
"IsomapWrapperDF",
"ImputerWrapperDF",
"MissingIndicatorWrapperDF",
"AdditiveChi2SamplerWrapperDF",
"KBinsDiscretizerWrapperDF",
"PolynomialFeaturesWrapperDF",
"OneHotEncoderWrapperDF",
]
#
# type variables
#
T_Transformer = TypeVar("T_Transformer", bound=TransformerMixin)
# T_Imputer is needed because sklearn's _BaseImputer only exists from v0.22 onwards.
# Once we drop support for sklearn 0.21, _BaseImputer can be used instead.
# The following TypeVar helps to annotate availability of "add_indicator" and
# "missing_values" attributes on an imputer instance for ImputerWrapperDF below
# noinspection PyProtectedMember
from sklearn.impute._iterative import IterativeImputer
T_Imputer = TypeVar("T_Imputer", SimpleImputer, IterativeImputer)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# wrapper classes for transformers
#
class NumpyTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that only accept numpy arrays.
Converts data frames to numpy arrays before handing off to the native transformer.
Implementations must define :meth:`_get_features_original`.
"""
# noinspection PyPep8Naming
def _adjust_X_type_for_delegate(
self, X: pd.DataFrame, *, to_numpy: Optional[bool] = None
) -> np.ndarray:
assert to_numpy is not False, "X must be converted to a numpy array"
return super()._adjust_X_type_for_delegate(X, to_numpy=True)
def _adjust_y_type_for_delegate(
self,
y: Optional[Union[pd.Series, pd.DataFrame]],
*,
to_numpy: Optional[bool] = None,
) -> Optional[np.ndarray]:
assert to_numpy is not False, "y must be converted to a numpy array"
return super()._adjust_y_type_for_delegate(y, to_numpy=True)
class ColumnSubsetTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that do not change column names,
but that may remove one or more columns.
Implementations must define :meth:`_get_features_out`.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# return column labels for arrays returned by the fitted transformer.
pass
def _get_features_original(self) -> pd.Series:
# return the series with output columns in index and output columns as values
features_out = self._get_features_out()
return pd.Series(index=features_out, data=features_out.values)
class ColumnPreservingTransformerWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
):
"""
DF wrapper for transformers whose output columns match the input columns.
The native transformer must not add, remove, reorder, or rename any of the input
columns.
"""
def _get_features_out(self) -> pd.Index:
return self.feature_names_in_
class BaseMultipleInputsPerOutputTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer]
):
"""
DF wrapper for transformers mapping multiple input columns to individual output
columns.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# make this method abstract to ensure subclasses override the default
# behaviour, which usually relies on method ``_get_features_original``
pass
def _get_features_original(self) -> pd.Series:
raise NotImplementedError(
f"{type(self.native_estimator).__name__} transformers map multiple "
"inputs to individual output columns; current sklearndf implementation "
"only supports many-to-1 mappings from output columns to input columns"
)
class BaseDimensionalityReductionWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers.
The native transformer is considered to map all input columns to each output column.
"""
@property
@abstractmethod
def _n_components_(self) -> int:
pass
def _get_features_out(self) -> pd.Index:
return pd.Index([f"x_{i}" for i in range(self._n_components_)])
class NComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
:attr:`n_components` attribute.
Subclasses must implement :meth:`_get_features_original`.
"""
_ATTR_N_COMPONENTS = "n_components"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_N_COMPONENTS)
@property
def _n_components_(self) -> int:
return getattr(self.native_estimator, self._ATTR_N_COMPONENTS)
class ComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
``components_`` attribute.
The native transformer must provide a ``components_`` attribute once fitted,
as an array of shape (n_components, n_features).
"""
_ATTR_COMPONENTS = "components_"
# noinspection PyPep8Naming
def _post_fit(
self, X: pd.DataFrame, y: Optional[pd.Series] = None, **fit_params
) -> None:
# noinspection PyProtectedMember
super()._post_fit(X, y, **fit_params)
self._validate_delegate_attribute(attribute_name=self._ATTR_COMPONENTS)
@property
def _n_components_(self) -> int:
return len(getattr(self.native_estimator, self._ATTR_COMPONENTS))
class FeatureSelectionWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
DF wrapper for feature selection transformers.
The native transformer must implement a ``get_support`` method, providing the
indices of the selected input columns
"""
_ATTR_GET_SUPPORT = "get_support"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_GET_SUPPORT)
def _get_features_out(self) -> pd.Index:
get_support = getattr(self.native_estimator, self._ATTR_GET_SUPPORT)
return self.feature_names_in_[get_support()]
class ColumnTransformerWrapperDF(
TransformerWrapperDF[ColumnTransformer], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.compose.ColumnTransformer`.
Requires all transformers passed as the ``transformers`` parameter to implement
:class:`.TransformerDF`.
"""
__DROP = "drop"
__PASSTHROUGH = "passthrough"
__SPECIAL_TRANSFORMERS = (__DROP, __PASSTHROUGH)
def _validate_delegate_estimator(self) -> None:
column_transformer: ColumnTransformer = self.native_estimator
if (
column_transformer.remainder
not in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
):
raise ValueError(
f"unsupported value for arg remainder: ({column_transformer.remainder})"
)
non_compliant_transformers: List[str] = [
type(transformer).__name__
for _, transformer, _ in column_transformer.transformers
if not (
isinstance(transformer, TransformerDF)
or transformer in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
)
]
if non_compliant_transformers:
from .. import ColumnTransformerDF
raise ValueError(
f"{ColumnTransformerDF.__name__} only accepts instances of "
f"{TransformerDF.__name__} or special values "
f'"{" and ".join(ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS)}" '
"as valid transformers, but "
f'also got: {", ".join(non_compliant_transformers)}'
)
def _get_features_original(self) -> pd.Series:
"""
Return the series mapping output column names to original columns names.
:return: the series with index the column names of the output dataframe and
values the corresponding input column names.
"""
def _features_original(df_transformer: TransformerDF, columns: List[Any]):
if df_transformer == ColumnTransformerWrapperDF.__PASSTHROUGH:
# we may get positional indices for columns selected by the
# 'passthrough' transformer, and in that case so need to look up the
# associated column names
if all(isinstance(column, int) for column in columns):
column_names = self._get_features_in()[columns]
else:
column_names = columns
return | pd.Series(index=column_names, data=column_names) | pandas.Series |
"""Plotting functions for AnnData.
"""
import os
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from matplotlib import pyplot as pl
from matplotlib import rcParams
from matplotlib.colors import is_color_like
import seaborn as sns
from .. import settings
from .. import logging as logg
from . import utils
from .utils import scatter_base, scatter_group, setup_axes
from ..utils import sanitize_anndata, doc_params
from .docs import doc_scatter_bulk, doc_show_save_ax
VALID_LEGENDLOCS = {
'none', 'right margin', 'on data', 'on data export', 'best', 'upper right', 'upper left',
'lower left', 'lower right', 'right', 'center left', 'center right',
'lower center', 'upper center', 'center'
}
@doc_params(scatter_bulk=doc_scatter_bulk, show_save_ax=doc_show_save_ax)
def scatter(
adata,
x=None,
y=None,
color=None,
use_raw=None,
layers='X',
sort_order=True,
alpha=None,
basis=None,
groups=None,
components=None,
projection='2d',
legend_loc='right margin',
legend_fontsize=None,
legend_fontweight=None,
color_map=None,
palette=None,
frameon=None,
right_margin=None,
left_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None):
"""\
Scatter plot along observations or variables axes.
Color the plot using annotations of observations (`.obs`), variables
(`.var`) or expression of genes (`.var_names`).
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
x : `str` or `None`
x coordinate.
y : `str` or `None`
y coordinate.
color : string or list of strings, optional (default: `None`)
Keys for annotations of observations/cells or variables/genes, e.g.,
`'ann1'` or `['ann1', 'ann2']`.
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
layers : `str` or tuple of strings, optional (default: `X`)
Use the `layers` attribute of `adata` if present: specify the layer for
`x`, `y` and `color`. If `layers` is a string, then it is expanded to
`(layers, layers, layers)`.
basis : {{'pca', 'tsne', 'umap', 'diffmap', 'draw_graph_fr', etc.}}
String that denotes a plotting tool that computed coordinates.
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a `matplotlib.Axis` or a list of it.
"""
if basis is not None:
axs = _scatter_obs(
adata=adata,
x=x,
y=y,
color=color,
use_raw=use_raw,
layers=layers,
sort_order=sort_order,
alpha=alpha,
basis=basis,
groups=groups,
components=components,
projection=projection,
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
frameon=frameon,
right_margin=right_margin,
left_margin=left_margin,
size=size,
title=title,
show=show,
save=save,
ax=ax)
elif x is not None and y is not None:
if ((x in adata.obs.keys() or x in adata.var.index)
and (y in adata.obs.keys() or y in adata.var.index)
and (color is None or color in adata.obs.keys() or color in adata.var.index)):
axs = _scatter_obs(
adata=adata,
x=x,
y=y,
color=color,
use_raw=use_raw,
layers=layers,
sort_order=sort_order,
alpha=alpha,
basis=basis,
groups=groups,
components=components,
projection=projection,
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
frameon=frameon,
right_margin=right_margin,
left_margin=left_margin,
size=size,
title=title,
show=show,
save=save,
ax=ax)
elif ((x in adata.var.keys() or x in adata.obs.index)
and (y in adata.var.keys() or y in adata.obs.index)
and (color is None or color in adata.var.keys() or color in adata.obs.index)):
axs = _scatter_var(
adata=adata,
x=x,
y=y,
color=color,
use_raw=use_raw,
layers=layers,
sort_order=sort_order,
alpha=alpha,
basis=basis,
groups=groups,
components=components,
projection=projection,
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
frameon=frameon,
right_margin=right_margin,
left_margin=left_margin,
size=size,
title=title,
show=show,
save=save,
ax=ax)
else:
raise ValueError(
'`x`, `y`, and potential `color` inputs must all come from either `.obs` or `.var`')
else:
raise ValueError('Either provide a `basis` or `x` and `y`.')
return axs
def _scatter_var(
adata,
x=None,
y=None,
color=None,
use_raw=None,
layers='X',
sort_order=True,
alpha=None,
basis=None,
groups=None,
components=None,
projection='2d',
legend_loc='right margin',
legend_fontsize=None,
legend_fontweight=None,
color_map=None,
palette=None,
frameon=None,
right_margin=None,
left_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None):
adata_T = adata.T
axs = _scatter_obs(
adata=adata_T,
x=x,
y=y,
color=color,
use_raw=use_raw,
layers=layers,
sort_order=sort_order,
alpha=alpha,
basis=basis,
groups=groups,
components=components,
projection=projection,
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
frameon=frameon,
right_margin=right_margin,
left_margin=left_margin,
size=size,
title=title,
show=show,
save=save,
ax=ax)
# store .uns annotations that were added to the new adata object
adata.uns = adata_T.uns
return axs
def _scatter_obs(
adata,
x=None,
y=None,
color=None,
use_raw=None,
layers='X',
sort_order=True,
alpha=None,
basis=None,
groups=None,
components=None,
projection='2d',
legend_loc='right margin',
legend_fontsize=None,
legend_fontweight=None,
color_map=None,
palette=None,
frameon=None,
right_margin=None,
left_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None):
"""See docstring of scatter."""
sanitize_anndata(adata)
from scipy.sparse import issparse
if use_raw is None and adata.raw is not None: use_raw = True
# process layers
if layers is None:
layers = 'X'
if isinstance(layers, str) and (layers == 'X' or layers in adata.layers.keys()):
layers = (layers, layers, layers)
elif isinstance(layers, (tuple, list)) and len(layers) == 3:
for layer in layers:
if layer not in adata.layers.keys() and layer != 'X':
raise ValueError(
'`layers` should have elements that are either \'X\' or in adata.layers.keys().')
else:
raise ValueError('`layers` should be a string or a list/tuple of length 3.')
if use_raw and (layers != ('X', 'X', 'X') or layers != ['X', 'X', 'X']):
ValueError('`use_raw` must be `False` if layers other than \'X\' are used.')
if legend_loc not in VALID_LEGENDLOCS:
raise ValueError(
'Invalid `legend_loc`, need to be one of: {}.'.format(VALID_LEGENDLOCS))
if components is None: components = '1,2' if '2d' in projection else '1,2,3'
if isinstance(components, str): components = components.split(',')
components = np.array(components).astype(int) - 1
keys = ['grey'] if color is None else [color] if isinstance(color, str) else color
if title is not None and isinstance(title, str):
title = [title]
highlights = adata.uns['highlights'] if 'highlights' in adata.uns else []
if basis is not None:
try:
# ignore the '0th' diffusion component
if basis == 'diffmap': components += 1
Y = adata.obsm['X_' + basis][:, components]
# correct the component vector for use in labeling etc.
if basis == 'diffmap': components -= 1
except KeyError:
raise KeyError('compute coordinates using visualization tool {} first'
.format(basis))
elif x is not None and y is not None:
x_arr = adata._get_obs_array(x, use_raw=use_raw, layer=layers[0])
y_arr = adata._get_obs_array(y, use_raw=use_raw, layer=layers[1])
x_arr = x_arr.toarray().flatten() if issparse(x_arr) else x_arr
y_arr = y_arr.toarray().flatten() if issparse(y_arr) else y_arr
Y = np.c_[x_arr[:, None], y_arr[:, None]]
else:
raise ValueError('Either provide a `basis` or `x` and `y`.')
if size is None:
n = Y.shape[0]
size = 120000 / n
if legend_loc.startswith('on data') and legend_fontsize is None:
legend_fontsize = rcParams['legend.fontsize']
elif legend_fontsize is None:
legend_fontsize = rcParams['legend.fontsize']
palette_was_none = False
if palette is None: palette_was_none = True
if isinstance(palette, list):
if not is_color_like(palette[0]):
palettes = palette
else:
palettes = [palette]
else:
palettes = [palette for i in range(len(keys))]
for i, palette in enumerate(palettes):
palettes[i] = utils.default_palette(palette)
if basis is not None:
component_name = (
'DC' if basis == 'diffmap'
else 'tSNE' if basis == 'tsne'
else 'UMAP' if basis == 'umap'
else 'PC' if basis == 'pca'
else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis
else basis)
else:
component_name = None
axis_labels = (x, y) if component_name is None else None
show_ticks = True if component_name is None else False
# generate the colors
color_ids = []
categoricals = []
colorbars = []
for ikey, key in enumerate(keys):
c = 'white'
categorical = False # by default, assume continuous or flat color
colorbar = None
# test whether we have categorial or continuous annotation
if key in adata.obs_keys():
if is_categorical_dtype(adata.obs[key]):
categorical = True
else:
c = adata.obs[key]
# coloring according to gene expression
elif (use_raw
and adata.raw is not None
and key in adata.raw.var_names):
c = adata.raw[:, key].X
elif key in adata.var_names:
c = adata[:, key].X if layers[2] == 'X' else adata[:, key].layers[layers[2]]
c = c.toarray().flatten() if issparse(c) else c
elif is_color_like(key): # a flat color
c = key
colorbar = False
else:
raise ValueError(
'key \'{}\' is invalid! pass valid observation annotation, '
'one of {} or a gene name {}'
.format(key, adata.obs_keys(), adata.var_names))
if colorbar is None:
colorbar = not categorical
colorbars.append(colorbar)
if categorical: categoricals.append(ikey)
color_ids.append(c)
if right_margin is None and len(categoricals) > 0:
if legend_loc == 'right margin': right_margin = 0.5
if title is None and keys[0] is not None:
title = [key.replace('_', ' ') if not is_color_like(key) else '' for key in keys]
axs = scatter_base(Y,
title=title,
alpha=alpha,
component_name=component_name,
axis_labels=axis_labels,
component_indexnames=components + 1,
projection=projection,
colors=color_ids,
highlights=highlights,
colorbars=colorbars,
right_margin=right_margin,
left_margin=left_margin,
sizes=[size for c in keys],
color_map=color_map,
show_ticks=show_ticks,
ax=ax)
def add_centroid(centroids, name, Y, mask):
Y_mask = Y[mask]
if Y_mask.shape[0] == 0: return
median = np.median(Y_mask, axis=0)
i = np.argmin(np.sum(np.abs(Y_mask - median), axis=1))
centroids[name] = Y_mask[i]
# loop over all categorical annotation and plot it
for i, ikey in enumerate(categoricals):
palette = palettes[i]
key = keys[ikey]
utils.add_colors_for_categorical_sample_annotation(
adata, key, palette, force_update_colors=not palette_was_none)
# actually plot the groups
mask_remaining = np.ones(Y.shape[0], dtype=bool)
centroids = {}
if groups is None:
for iname, name in enumerate(adata.obs[key].cat.categories):
if name not in settings.categories_to_ignore:
mask = scatter_group(axs[ikey], key, iname,
adata, Y, projection, size=size, alpha=alpha)
mask_remaining[mask] = False
if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask)
else:
groups = [groups] if isinstance(groups, str) else groups
for name in groups:
if name not in set(adata.obs[key].cat.categories):
raise ValueError('"' + name + '" is invalid!'
+ ' specify valid name, one of '
+ str(adata.obs[key].cat.categories))
else:
iname = np.flatnonzero(adata.obs[key].cat.categories.values == name)[0]
mask = scatter_group(axs[ikey], key, iname,
adata, Y, projection, size=size, alpha=alpha)
if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask)
mask_remaining[mask] = False
if mask_remaining.sum() > 0:
data = [Y[mask_remaining, 0], Y[mask_remaining, 1]]
if projection == '3d': data.append(Y[mask_remaining, 2])
axs[ikey].scatter(*data, marker='.', c='lightgrey', s=size,
edgecolors='none', zorder=-1)
legend = None
if legend_loc.startswith('on data'):
if legend_fontweight is None:
legend_fontweight = 'bold'
for name, pos in centroids.items():
axs[ikey].text(pos[0], pos[1], name,
weight=legend_fontweight,
verticalalignment='center',
horizontalalignment='center',
fontsize=legend_fontsize)
all_pos = np.zeros((len(adata.obs[key].cat.categories), 2))
for iname, name in enumerate(adata.obs[key].cat.categories):
if name in centroids:
all_pos[iname] = centroids[name]
else:
all_pos[iname] = [np.nan, np.nan]
utils._tmp_cluster_pos = all_pos
if legend_loc == 'on data export':
filename = settings.writedir + 'pos.csv'
logg.msg('exporting label positions to {}'.format(filename), v=1)
if settings.writedir != '' and not os.path.exists(settings.writedir):
os.makedirs(settings.writedir)
np.savetxt(filename, all_pos, delimiter=',')
elif legend_loc == 'right margin':
legend = axs[ikey].legend(
frameon=False, loc='center left',
bbox_to_anchor=(1, 0.5),
ncol=(1 if len(adata.obs[key].cat.categories) <= 14
else 2 if len(adata.obs[key].cat.categories) <= 30 else 3),
fontsize=legend_fontsize)
elif legend_loc != 'none':
legend = axs[ikey].legend(
frameon=False, loc=legend_loc, fontsize=legend_fontsize)
if legend is not None:
for handle in legend.legendHandles: handle.set_sizes([300.0])
# draw a frame around the scatter
frameon = settings._frameon if frameon is None else frameon
if not frameon:
for ax in axs:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_frame_on(False)
utils.savefig_or_show('scatter' if basis is None else basis, show=show, save=save)
if show == False: return axs if len(keys) > 1 else axs[0]
def ranking(adata, attr, keys, dictionary=None, indices=None,
labels=None, color='black', n_points=30,
log=False, show=None):
"""Plot rankings.
See, for example, how this is used in pl.pca_ranking.
Parameters
----------
adata : AnnData
The data.
attr : {'var', 'obs', 'uns', 'varm', 'obsm'}
The attribute of AnnData that contains the score.
keys : str or list of str
The scores to look up an array from the attribute of adata.
Returns
-------
Returns matplotlib gridspec with access to the axes.
"""
if isinstance(keys, str) and indices is not None:
scores = getattr(adata, attr)[keys][:, indices]
keys = ['{}{}'.format(keys[:-1], i+1) for i in indices]
else:
if dictionary is None:
scores = getattr(adata, attr)[keys]
else:
scores = getattr(adata, attr)[dictionary][keys]
n_panels = len(keys) if isinstance(keys, list) else 1
if n_panels == 1: scores, keys = scores[:, None], [keys]
if log: scores = np.log(scores)
if labels is None:
labels = adata.var_names if attr in {'var', 'varm'} else np.arange(scores.shape[0]).astype(str)
if isinstance(labels, str):
labels = [labels + str(i+1) for i in range(scores.shape[0])]
from matplotlib import gridspec
if n_panels <= 5: n_rows, n_cols = 1, n_panels
else: n_rows, n_cols = 2, int(n_panels/2 + 0.5)
fig = pl.figure(figsize=(n_cols * rcParams['figure.figsize'][0],
n_rows * rcParams['figure.figsize'][1]))
left, bottom = 0.2/n_cols, 0.13/n_rows
gs = gridspec.GridSpec(nrows=n_rows, ncols=n_cols, wspace=0.2,
left=left, bottom=bottom,
right=1-(n_cols-1)*left-0.01/n_cols,
top=1-(n_rows-1)*bottom-0.1/n_rows)
for iscore, score in enumerate(scores.T):
pl.subplot(gs[iscore])
indices = np.argsort(score)[::-1][:n_points+1]
for ig, g in enumerate(indices):
pl.text(ig, score[g], labels[g], color=color,
rotation='vertical', verticalalignment='bottom',
horizontalalignment='center', fontsize=8)
pl.title(keys[iscore].replace('_', ' '))
if n_panels <= 5 or count > n_cols: pl.xlabel('ranking')
pl.xlim(-0.9, ig + 0.9)
score_min, score_max = np.min(score[indices]), np.max(score[indices])
pl.ylim((0.95 if score_min > 0 else 1.05) * score_min,
(1.05 if score_max > 0 else 0.95) * score_max)
if show == False: return gs
@doc_params(show_save_ax=doc_show_save_ax)
def violin(adata, keys, groupby=None, log=False, use_raw=None, stripplot=True, jitter=True,
size=1, scale='width', order=None, multi_panel=None, show=None,
xlabel='', rotation=None, save=None, ax=None, **kwds):
"""\
Violin plot.
Wraps `seaborn.violinplot` for :class:`~anndata.AnnData`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
keys : `str` or list of `str`
Keys for accessing variables of `.var_names` or fields of `.obs`.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider.
log : `bool`, optional (default: `False`)
Plot on logarithmic axis.
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
multi_panel : `bool`, optional (default: `False`)
Display keys in multiple panels also when `groupby is not None`.
stripplot : `bool` optional (default: `True`)
Add a stripplot on top of the violin plot.
See `seaborn.stripplot`.
jitter : `float` or `bool`, optional (default: `True`)
Add jitter to the stripplot (only when stripplot is True)
See `seaborn.stripplot`.
size : int, optional (default: 1)
Size of the jitter points.
order : list of str, optional (default: `True`)
Order in which to show the categories.
scale : {{'area', 'count', 'width'}}, optional (default: 'width')
The method used to scale the width of each violin. If 'area', each
violin will have the same area. If 'count', the width of the violins
will be scaled by the number of observations in that bin. If 'width',
each violin will have the same width.
xlabel : `str`, optional (default: `''`)
Label of the x axis. Defaults to `groupby` if `rotation` is `None`,
otherwise, no label is shown.
rotation : `float`, optional (default: `None`)
Rotation of xtick labels.
{show_save_ax}
**kwds : keyword arguments
Are passed to `seaborn.violinplot`.
Returns
-------
A `matplotlib.Axes` object if `ax` is `None` else `None`.
"""
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(keys, str): keys = [keys]
obs_keys = False
for key in keys:
if key in adata.obs_keys(): obs_keys = True
if obs_keys and key not in set(adata.obs_keys()):
raise ValueError(
'Either use observation keys or variable names, but do not mix. '
'Did not find {} in adata.obs_keys().'.format(key))
if obs_keys:
obs_df = adata.obs
else:
if groupby is None: obs_df = pd.DataFrame()
else: obs_df = pd.DataFrame(adata.obs[groupby])
for key in keys:
if adata.raw is not None and use_raw:
X_col = adata.raw[:, key].X
else:
X_col = adata[:, key].X
obs_df[key] = X_col
if groupby is None:
obs_tidy = pd.melt(obs_df, value_vars=keys)
x = 'variable'
ys = ['value']
else:
obs_tidy = obs_df
x = groupby
ys = keys
if multi_panel:
if groupby is None and len(ys) == 1:
# This is a quick and dirty way for adapting scales across several
# keys if groupby is None.
y = ys[0]
g = sns.FacetGrid(obs_tidy, col=x, col_order=keys, sharey=False)
# don't really know why this gives a warning without passing `order`
g = g.map(sns.violinplot, y, inner=None, orient='vertical',
scale=scale, order=keys, **kwds)
if stripplot:
g = g.map(sns.stripplot, y, orient='vertical', jitter=jitter, size=size, order=keys,
color='black')
if log:
g.set(yscale='log')
g.set_titles(col_template='{col_name}').set_xlabels('')
if rotation is not None:
for ax in g.axes[0]:
ax.tick_params(labelrotation=rotation)
else:
if ax is None:
axs, _, _, _ = setup_axes(
ax=ax, panels=['x'] if groupby is None else keys, show_ticks=True, right_margin=0.3)
else:
axs = [ax]
for ax, y in zip(axs, ys):
ax = sns.violinplot(x, y=y, data=obs_tidy, inner=None, order=order,
orient='vertical', scale=scale, ax=ax, **kwds)
if stripplot:
ax = sns.stripplot(x, y=y, data=obs_tidy, order=order,
jitter=jitter, color='black', size=size, ax=ax)
if xlabel == '' and groupby is not None and rotation is None:
xlabel = groupby.replace('_', ' ')
ax.set_xlabel(xlabel)
if log:
ax.set_yscale('log')
if rotation is not None:
ax.tick_params(labelrotation=rotation)
utils.savefig_or_show('violin', show=show, save=save)
if show is False:
if multi_panel:
return g
elif len(axs) == 1:
return axs[0]
else:
return axs
@doc_params(show_save_ax=doc_show_save_ax)
def clustermap(
adata, obs_keys=None, use_raw=None, show=None, save=None, **kwds):
"""\
Hierarchically-clustered heatmap.
Wraps `seaborn.clustermap
<https://seaborn.pydata.org/generated/seaborn.clustermap.html>`__ for
:class:`~anndata.AnnData`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
obs_keys : `str`
Categorical annotation to plot with a different color map.
Currently, only a single key is supported.
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
{show_save_ax}
**kwds : keyword arguments
Keyword arguments passed to `seaborn.clustermap
<https://seaborn.pydata.org/generated/seaborn.clustermap.html>`__.
Returns
-------
If `show == False`, a `seaborn.ClusterGrid` object.
Notes
-----
The returned object has a savefig() method that should be used if you want
to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
clustergrid.dendrogram_row.reordered_ind
Column indices, use: clustergrid.dendrogram_col.reordered_ind
Examples
--------
Soon to come with figures. In the meanwile, see
https://seaborn.pydata.org/generated/seaborn.clustermap.html.
>>> import scanpy.api as sc
>>> adata = sc.datasets.krumsiek11()
>>> sc.pl.clustermap(adata, obs_keys='cell_type')
"""
if not isinstance(obs_keys, (str, type(None))):
raise ValueError('Currently, only a single key is supported.')
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
X = adata.raw.X if use_raw else adata.X
df = | pd.DataFrame(X, index=adata.obs_names, columns=adata.var_names) | pandas.DataFrame |
import json
import networkx as nx
import numpy as np
import os
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from config import logger, config
def read_profile_data():
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_df = pd.read_csv(config.profile_file)
profile_na.columns = profile_df.columns
profile_df = profile_df.append(profile_na)
return profile_df
def merge_raw_data():
tr_queries = pd.read_csv(config.train_query_file, parse_dates=['req_time'])
te_queries = pd.read_csv(config.test_query_file, parse_dates=['req_time'])
tr_plans = pd.read_csv(config.train_plan_file, parse_dates=['plan_time'])
te_plans = pd.read_csv(config.test_plan_file, parse_dates=['plan_time'])
tr_click = pd.read_csv(config.train_click_file)
trn = tr_queries.merge(tr_click, on='sid', how='left')
trn = trn.merge(tr_plans, on='sid', how='left')
trn = trn.drop(['click_time'], axis=1)
trn['click_mode'] = trn['click_mode'].fillna(0)
tst = te_queries.merge(te_plans, on='sid', how='left')
tst['click_mode'] = -1
df = pd.concat([trn, tst], axis=0, sort=False)
df = df.drop(['plan_time'], axis=1)
df = df.reset_index(drop=True)
df['weekday'] = df['req_time'].dt.weekday
df['day'] = df['req_time'].dt.day
df['hour'] = df['req_time'].dt.hour
df = df.drop(['req_time'], axis=1)
logger.info('total data size: {}'.format(df.shape))
logger.info('data columns: {}'.format(', '.join(df.columns)))
return df
def extract_plans(df):
plans = []
for sid, plan in tqdm(zip(df['sid'].values, df['plans'].values)):
try:
p = json.loads(plan)
for x in p:
x['sid'] = sid
plans.extend(p)
except:
pass
return pd.DataFrame(plans)
def generate_od_features(df):
feat = df[['o','d']].drop_duplicates()
feat = feat.merge(df.groupby('o')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='o')
feat.rename(columns={'day': 'o_nunique_day',
'hour': 'o_nunique_hour',
'pid': 'o_nunique_pid',
'click_mode': 'o_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby('d')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='d')
feat.rename(columns={'day': 'd_nunique_day',
'hour': 'd_nunique_hour',
'pid': 'd_nunique_pid',
'click_mode': 'd_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby(['o', 'd'])[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on=['o', 'd'])
feat.rename(columns={'day': 'od_nunique_day',
'hour': 'od_nunique_hour',
'pid': 'od_nunique_pid',
'click_mode': 'od_nunique_click'}, inplace=True)
return feat
def generate_pid_features(df):
feat = df.groupby('pid')[['hour', 'day']].nunique().reset_index()
feat.rename(columns={'hour': 'pid_nunique_hour', 'day': 'pid_nunique_day'}, inplace=True)
feat['nunique_hour_d_nunique_day'] = feat['pid_nunique_hour'] / feat['pid_nunique_day']
feat = feat.merge(df.groupby('pid')[['o', 'd']].nunique().reset_index(), how='left', on='pid')
feat.rename(columns={'o': 'pid_nunique_o', 'd': 'pid_nunique_d'}, inplace=True)
feat['nunique_o_d_nunique_d'] = feat['pid_nunique_o'] / feat['pid_nunique_d']
return feat
def generate_od_cluster_features(df):
G = nx.Graph()
G.add_nodes_from(df['o'].unique().tolist())
G.add_nodes_from(df['d'].unique().tolist())
edges = df[['o','d']].apply(lambda x: (x[0],x[1]), axis=1).tolist()
G.add_edges_from(edges)
cluster = nx.clustering(G)
cluster_df = pd.DataFrame([{'od': key, 'cluster': cluster[key]} for key in cluster.keys()])
return cluster_df
def gen_od_feas(data):
data['o1'] = data['o'].apply(lambda x: float(x.split(',')[0]))
data['o2'] = data['o'].apply(lambda x: float(x.split(',')[1]))
data['d1'] = data['d'].apply(lambda x: float(x.split(',')[0]))
data['d2'] = data['d'].apply(lambda x: float(x.split(',')[1]))
data = data.drop(['o', 'd'], axis=1)
return data
def gen_plan_feas(data):
n = data.shape[0]
mode_list_feas = np.zeros((n, 12))
max_dist, min_dist, mean_dist, std_dist = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_texts = []
for i, plan in tqdm(enumerate(data['plans'].values)):
try:
cur_plan_list = json.loads(plan)
except:
cur_plan_list = []
if len(cur_plan_list) == 0:
mode_list_feas[i, 0] = 1
first_mode[i] = 0
max_dist[i] = -1
min_dist[i] = -1
mean_dist[i] = -1
std_dist[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
min_dist_mode[i] = -1
max_dist_mode[i] = -1
min_price_mode[i] = -1
max_price_mode[i] = -1
min_eta_mode[i] = -1
max_eta_mode[i] = -1
mode_texts.append('word_null')
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in cur_plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
mode_texts.append(
' '.join(['word_{}'.format(mode) for mode in mode_list]))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feas[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_dist[i] = distance_list[distance_sort_idx[-1]]
min_dist[i] = distance_list[distance_sort_idx[0]]
mean_dist[i] = np.mean(distance_list)
std_dist[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
max_dist_mode[i] = mode_list[distance_sort_idx[-1]]
min_dist_mode[i] = mode_list[distance_sort_idx[0]]
max_price_mode[i] = mode_list[price_sort_idx[-1]]
min_price_mode[i] = mode_list[price_sort_idx[0]]
max_eta_mode[i] = mode_list[eta_sort_idx[-1]]
min_eta_mode[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feas)
feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]
feature_data['max_dist'] = max_dist
feature_data['min_dist'] = min_dist
feature_data['mean_dist'] = mean_dist
feature_data['std_dist'] = std_dist
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['max_dist_mode'] = max_dist_mode
feature_data['min_dist_mode'] = min_dist_mode
feature_data['max_price_mode'] = max_price_mode
feature_data['min_price_mode'] = min_price_mode
feature_data['max_eta_mode'] = max_eta_mode
feature_data['min_eta_mode'] = min_eta_mode
feature_data['first_mode'] = first_mode
logger.info('mode tfidf...')
tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))
tfidf_vec = tfidf_enc.fit_transform(mode_texts)
svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)
mode_svd = svd_enc.fit_transform(tfidf_vec)
mode_svd = pd.DataFrame(mode_svd)
mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]
data = pd.concat([data, feature_data, mode_svd], axis=1)
data = data.drop(['plans'], axis=1)
return data
def gen_profile_feas(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=2019)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_fea_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def group_weekday_and_hour(row):
if row['weekday'] == 0 or row['weekday'] == 6:
w = 0
else:
w = row['weekday']
if row['hour'] > 7 and row['hour'] < 18: # 7:00 - 18:00
h = row['hour']
elif row['hour'] >= 18 and row['hour'] < 21: # 18:00 - 21:00
h = 1
elif row['hour'] >= 21 or row['hour'] < 6: # 21:00 - 6:00
h = 0
else: # 6:00 - 7:00
h = 2
return str(w) + '_' + str(h)
def gen_ratio_feas(data):
data['dist-d-eta'] = data['mean_dist'] / data['mean_eta']
data['price-d-dist'] = data['mean_price'] / data['mean_dist']
data['price-d-eta'] = data['mean_price'] / data['mean_eta']
data['o1-d-d1'] = data['o1'] / data['d1']
data['o2-d-d2'] = data['o2'] / data['d2']
return data
def gen_fly_dist_feas(data):
data['fly-dist'] = ((data['d1'] - data['o1'])**2 + (data['d2'] - data['o2'])**2)**0.5
data['fly-dist-d-dist'] = data['fly-dist'] / data['mean_dist']
data['fly-dist-d-eta'] = data['fly-dist'] / data['mean_eta']
data['price-d-fly-dist'] = data['mean_price'] / data['fly-dist']
return data
def gen_aggregate_profile_feas(data):
aggr = data.groupby('pid')['sid'].agg(['count'])
aggr.columns = ['%s_%s' % ('sid', col) for col in aggr.columns.values]
aggr = aggr.reset_index()
aggr.loc[aggr['pid'] == -1.0,'sid_count'] = 0 # reset in case pid == -1
data = data.merge(aggr, how='left', on=['pid'])
return data
def gen_pid_feat(data):
feat = pd.read_csv(config.pid_feature_file)
data = data.merge(feat, how='left', on='pid')
return data
def gen_od_feat(data):
feat = pd.read_csv(config.od_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
logger.info('sid shape={}'.format(sid.shape))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
logger.info('feature shape={}'.format(feat.shape))
logger.info('feature columns={}'.format(feat.columns))
data = data.merge(feat, how='left', on='sid')
click_cols = [c for c in feat.columns if c.endswith('click')]
data.drop(click_cols, axis=1, inplace=True)
return data
def gen_od_cluster_feat(data):
feat = pd.read_csv(config.od_cluster_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
f = feat.copy()
feat = sid.merge(feat, how='left', left_on='o', right_on='od').drop(['od','o'], axis=1)
feat.rename(columns={'cluster': 'o_cluster'}, inplace=True)
feat = feat.merge(f, how='left', left_on='d', right_on='od').drop(['od','d'], axis=1)
feat.rename(columns={'cluster': 'd_cluster'}, inplace=True)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_eq_feat(data):
data['o1-eq-d1'] = (data['o1'] == data['d1']).astype(int)
data['o2-eq-d2'] = (data['o2'] == data['d2']).astype(int)
data['o-eq-d'] = data['o1-eq-d1']*data['o2-eq-d2']
data['o1-m-o2'] = np.abs(data['o1'] - data['o2'])
data['d1-m-d2'] = np.abs(data['d1'] - data['d2'])
data['od_area'] = data['o1-m-o2']*data['d1-m-d2']
data['od_ratio'] = data['o1-m-o2']/data['d1-m-d2']
return data
def gen_od_mode_cnt_feat(data):
feat = pd.read_csv(config.od_mode_cnt_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_weekday_hour_cnt_feat(data):
feat = pd.read_csv(config.weekday_hour_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','req_time'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','req_time'])
sid = pd.concat((tr_sid, te_sid))
sid['req_time'] = pd.to_datetime(sid['req_time'])
sid['hour'] = sid['req_time'].map(lambda x: x.hour)
sid['weekday'] = sid['req_time'].map(lambda x: x.weekday())
feat = sid.merge(feat, how='left', on=['hour','weekday']).drop(['hour','weekday','req_time'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_plan_agg_feat(data):
#feat = pd.read_csv(config.od_plan_agg_feature_file)
#tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d','req_time'])
#te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d', 'req_time'])
#sid = pd.concat((tr_sid, te_sid))
#sid['req_time'] = pd.to_datetime(sid['req_time'])
#sid['hour'] = sid['req_time'].map(lambda x: x.hour)
#feat = sid.merge(feat, how='left', on=['o','d','hour']).drop(['o','d','hour','req_time'], axis=1)
feat = pd.read_csv(config.od_plan_agg_feature_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_mode_feat(data):
feat = pd.read_csv(config.mode_feature_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_mode_stats_feat(data):
feat = pd.read_csv(config.od_stats_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_daily_plan_feat(data):
feat = pd.read_csv(config.daily_plan_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_weather_feat(data):
feat = pd.read_csv(config.weather_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_pid_count_feat(data):
feat = pd.read_csv(config.od_pid_count_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_plan_ratio_feat(data):
feat = pd.read_csv(config.plan_ratio_file)
data = data.merge(feat, how='left', on='sid')
return data
def generate_f1(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f1')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
df = gen_od_feas(df)
df = gen_plan_feas(df)
df = gen_profile_feas(df)
df = gen_ratio_feas(df)
df = gen_fly_dist_feas(df)
df = gen_aggregate_profile_feas(df) # 0.6759966661470926
df = gen_pid_feat(df) # 0.6762996872664375
df = gen_od_feat(df) # without click count: 0.6780576865566392; with click count: 0.6795810670221226
df = gen_od_cluster_feat(df) # 0.6796523605372234
df = gen_od_eq_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def generate_f2(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f2')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
trn, tst = generate_f1(df)
df = pd.concat((trn, tst))
df = gen_od_mode_cnt_feat(df) # [+] fold #0: 0.6835031183515229
df = gen_weekday_hour_cnt_feat(df)
df = gen_od_plan_agg_feat(df)
df = gen_mode_feat(df)
#df = gen_mode_stats_feat(df)
## df = gen_weather_feat(df)
#df = gen_daily_plan_feat(df)
#df = gen_od_pid_count_feat(df)
## df = gen_plan_ratio_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def generate_f3(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f1')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
trn, tst = generate_f2(df)
df = pd.concat((trn, tst))
#df = gen_mode_stats_feat(df)
## df = gen_weather_feat(df)
#df = gen_daily_plan_feat(df)
#df = gen_od_pid_count_feat(df)
## df = gen_plan_ratio_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def get_train_test_features():
config.set_feature_name('f1')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f1.')
trn, tst = generate_f1(df)
logger.info('saving the training and test f1 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features2():
config.set_feature_name('f2')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f2.')
trn, tst = generate_f2(df)
logger.info('saving the training and test f2 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features2a():
config.set_feature_name('f2')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f2.')
trn, tst = generate_f2(df)
logger.info('saving the training and test f2 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/build/feature/od_coord_feature.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/input/data_set_phase1/var_dist_time.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/input/data_set_phase1/var_dist_min.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features3():
config.set_feature_name('f3')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f3.')
trn, tst = generate_f3(df)
logger.info('saving the training and test f3 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features4():
config.set_feature_name('f4')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features0():
config.set_feature_name('f0')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
feat = | pd.read_csv('/home/ubuntu/projects/kddcup2019track1/build/feature/od_coord_feature.csv') | pandas.read_csv |
# %% imports
import numpy as np
import pandas as pd
import config as cfg
from src.utils.data_processing import hours_in_year, medea_path
# --------------------------------------------------------------------------- #
# %% settings and initializing
# --------------------------------------------------------------------------- #
STATIC_FNAME = medea_path('data', 'processed', 'data_static.xlsx')
idx = pd.IndexSlice
# --------------------------------------------------------------------------- #
# %% read in data
# --------------------------------------------------------------------------- #
static_data = {
'CAP_R': pd.read_excel(STATIC_FNAME, 'INITIAL_CAP_R', header=[0], index_col=[0, 1]),
'CAPCOST_R': pd.read_excel(STATIC_FNAME, 'CAPITALCOST_R', header=[0], index_col=[0, 1]),
'potentials': pd.read_excel(STATIC_FNAME, 'potentials', header=[0], index_col=[0]),
'tec': pd.read_excel(STATIC_FNAME, 'parameters_G'),
'feasops': pd.read_excel(STATIC_FNAME, 'FEASIBLE_INPUT-OUTPUT'),
'cost_transport': pd.read_excel(STATIC_FNAME, 'COST_TRANSPORT', header=[0], index_col=[0]),
'CAPCOST_K': pd.read_excel(STATIC_FNAME, 'CAPITALCOST_S', header=[0], index_col=[0, 1]),
'CAP_X': pd.read_excel(STATIC_FNAME, 'ATC', index_col=[0]),
'DISTANCE': pd.read_excel(STATIC_FNAME, 'KM', index_col=[0]),
'AIR_POLLUTION': pd.read_excel(STATIC_FNAME, 'AIR_POLLUTION', index_col=[0])
}
# --------------------------------------------------------------------------------------------------------------------
plant_data = {
'hydro': pd.read_excel(medea_path('data', 'processed', 'plant-list_hydro.xlsx'), 'opsd_hydro'),
'conventional': pd.read_excel(medea_path('data', 'processed', 'power_plant_db.xlsx'))
}
ts_data = {
'timeseries': pd.read_csv(medea_path('data', 'processed', 'medea_regional_timeseries.csv'))
}
# --------------------------------------------------------------------------- #
# %% prepare set data
# --------------------------------------------------------------------------- #
dict_sets = {
'f': {
'Nuclear': [10],
'Lignite': [20],
'Coal': [30],
'Gas': [40],
'Oil': [50],
'Hydro': [60],
'Biomass': [70],
'Solar': [80],
'Wind': [90],
'Power': [100],
'Heat': [110],
'Syngas': [120]
},
'l': {f'l{x}': [True] for x in range(1, 5)},
'm': {
'el': True,
'ht': True
},
'n': {
'pv': [True],
'ror': [True],
'wind_on': [True],
'wind_off': [True]
},
'k': {
'psp_day': [True],
'psp_week': [True],
'psp_season': [True],
'res_day': [True],
'res_week': [True],
'res_season': [True],
'battery': [True]
},
't': {f't{hour}': [True] for hour in range(1, hours_in_year(cfg.year) + 1)},
'z': {zone: [True] for zone in cfg.zones}
}
# convert to DataFrames
for key, value in dict_sets.items():
dict_sets.update({key: pd.DataFrame.from_dict(dict_sets[key], orient='index', columns=['Value'])})
# --------------------------------------------------------------------------- #
# %% prepare static data
# --------------------------------------------------------------------------- #
# Source 'CO2_INTENSITY': CO2 Emission Factors for Fossil Fuels, UBA, 2016
dict_static = {
'CO2_INTENSITY': {
'Nuclear': [0],
'Lignite': [0.399],
'Coal': [0.337],
'Gas': [0.201],
'Oil': [0.266],
'Hydro': [0],
'Biomass': [0],
'Solar': [0],
'Wind': [0],
'Power': [0],
'Heat': [0],
'Syngas': [0]
},
'eta': {
'nuc': [0.34],
'lig_stm': [0.31], 'lig_stm_chp': [0.31],
'lig_boa': [0.43], 'lig_boa_chp': [0.43],
'coal_sub': [0.32], 'coal_sub_chp': [0.32],
'coal_sc': [0.41], 'coal_sc_chp': [0.41],
'coal_usc': [0.44], 'coal_usc_chp': [0.44],
'coal_igcc': [0.55],
'ng_stm': [0.40], 'ng_stm_chp': [0.40],
'ng_cbt_lo': [0.34], 'ng_cbt_lo_chp': [0.34],
'ng_cbt_hi': [0.40], 'ng_cbt_hi_chp': [0.40],
'ng_cc_lo': [0.38], 'ng_cc_lo_chp': [0.38],
'ng_cc_hi': [0.55], 'ng_cc_hi_chp': [0.55],
'ng_mtr': [0.40], 'ng_mtr_chp': [0.40],
'ng_boiler_chp': [0.90],
'oil_stm': [0.31], 'oil_stm_chp': [0.31],
'oil_cbt': [0.35], 'oil_cbt_chp': [0.35],
'oil_cc': [0.42], 'oil_cc_chp': [0.42],
'bio': [0.35], 'bio_chp': [0.35],
'heatpump_pth': [3.0]
},
'map_name2fuel': {
'nuc': 'Nuclear',
'lig': 'Lignite',
'coal': 'Coal',
'ng': 'Gas',
'oil': 'Oil',
'bio': 'Biomass',
'heatpump': 'Power'
},
'CAPCOST_X': {
'AT': [1250],
'DE': [1250]
},
'VALUE_NSE': {
'AT': [12500],
'DE': [12500]
},
'LAMBDA': [0.125],
'SIGMA': [0.175]
}
dict_additions = {
'boilers': {
# 'medea_type': [49.5],
'set_element': 'ng_boiler_chp',
('cap', 'AT'): [4.5],
('cap', 'DE'): [25.5],
('eta', 'AT'): [0.9],
('eta', 'DE'): [0.9]
# ('count', 'AT'): [15],
# ('count', 'DE'): [85],
# ('num', 'AT'): [85],
# ('num', 'DE'): [255]
},
'heatpumps': {
# 'medea_type': [100],
'set_element': 'heatpump_pth',
('cap', 'AT'): [0.1],
('cap', 'DE'): [0.1],
('eta', 'AT'): [3.0],
('eta', 'DE'): [3.0]
# ('count', 'AT'): [1],
# ('count', 'DE'): [1],
# ('num', 'AT'): [1],
# ('num', 'DE'): [1]
},
'batteries': {
'power_in': [0],
'power_out': [0],
'energy_max': [0],
'efficiency_in': [0.96],
'efficiency_out': [0.96],
'cost_power': [static_data['CAPCOST_K'].loc[('AT', 'battery'), 'annuity-power'].round(4)],
'cost_energy': [static_data['CAPCOST_K'].loc[('AT', 'battery'), 'annuity-energy'].round(4)],
'inflow_factor': [0]
}
}
dict_instantiate = {'CO2_INTENSITY': pd.DataFrame.from_dict(dict_static['CO2_INTENSITY'],
orient='index', columns=['Value'])}
dict_instantiate.update({'efficiency': pd.DataFrame.from_dict(dict_static['eta'], orient='index', columns=['l1'])})
dict_instantiate['efficiency']['product'] = 'el'
dict_instantiate['efficiency'].loc[dict_instantiate['efficiency'].index.str.contains('pth'), 'product'] = 'ht'
dict_instantiate['efficiency'].loc['ng_boiler_chp', 'product'] = 'ht'
dict_instantiate['efficiency']['fuel'] = dict_instantiate['efficiency'].index.to_series().str.split('_').str.get(
0).replace(dict_static['map_name2fuel'])
dict_instantiate['efficiency'].set_index(['product', 'fuel'], append=True, inplace=True)
dict_instantiate['efficiency'].index.set_names(['medea_type', 'product', 'fuel_name'], inplace=True)
for i in range(1, 6):
dict_instantiate['efficiency'][f'l{i}'] = dict_instantiate['efficiency']['l1']
dict_instantiate.update({'CAP_R': static_data['CAP_R'].loc[idx[:, cfg.year], :]})
dict_instantiate.update({'CAP_X': static_data['CAP_X'].loc[
static_data['CAP_X'].index.str.contains('|'.join(cfg.zones)),
static_data['CAP_X'].columns.str.contains('|'.join(cfg.zones))] / 1000})
dict_instantiate.update({'DISTANCE': static_data['DISTANCE'].loc[static_data['DISTANCE'].index.str.contains(
'|'.join(cfg.zones)), static_data['DISTANCE'].columns.str.contains('|'.join(cfg.zones))]})
static_data.update({'CAPCOST_X': pd.DataFrame.from_dict(dict_static['CAPCOST_X'], orient='index', columns=['Value'])})
static_data.update({'VALUE_NSE': pd.DataFrame.from_dict(dict_static['VALUE_NSE'], orient='index', columns=['Value'])})
static_data.update({'LAMBDA': pd.DataFrame(dict_static['LAMBDA'], columns=['Value'])})
static_data.update({'SIGMA': pd.DataFrame(dict_static['SIGMA'], columns=['Value'])})
# --------------------------------------------------------------------------- #
# %% preprocessing plant data
# --------------------------------------------------------------------------- #
# dispatchable (thermal) plants
# filter active thermal plants
plant_data.update({'active': plant_data['conventional'].loc[
(plant_data['conventional']['UnitOperOnlineDate'] < pd.Timestamp(cfg.year, 1, 1)) &
(plant_data['conventional']['UnitOperRetireDate'] > pd.Timestamp(cfg.year, 12, 31)) |
np.isnat(plant_data['conventional']['UnitOperRetireDate'])]})
# exclude hydro power plant
plant_data['active'] = plant_data['active'].loc[(plant_data['active']['MedeaType'] < 60) |
(plant_data['active']['MedeaType'] >= 70)]
# capacities by country in GW
prop_g = plant_data['active'].groupby(['MedeaType', 'PlantCountry'])['UnitNameplate'].sum().to_frame() / 1000
prop_g['eta'] = plant_data['active'].groupby(['MedeaType', 'PlantCountry'])['Eta'].mean().to_frame()
# prop_g['count'] = plant_data['active'].groupby(['MedeaType'])['PlantCountry'].value_counts().to_frame(name='count')
# prop_g['num'] = (prop_g['UnitNameplate'].round(decimals=1) * 10).astype(int)
prop_g.rename(index={'Germany': 'DE', 'Austria': 'AT'}, columns={'UnitNameplate': 'cap'}, inplace=True)
prop_g = prop_g.unstack(-1)
prop_g.drop(0.0, axis=0, inplace=True)
# index by plant element names instead of medea_type-numbers
prop_g.index = prop_g.index.map(pd.Series(static_data['tec']['set_element'].values,
index=static_data['tec']['medea_type'].values).to_dict())
# update 'empirical' efficiencies with generic efficiencies
for zone in cfg.zones:
prop_g.loc[:, idx['eta', zone]].update(pd.DataFrame.from_dict(dict_static['eta'],
orient='index', columns=['eta']).iloc[:, 0])
# add data for heat boilers
prop_g = prop_g.append(pd.DataFrame.from_dict(dict_additions['boilers']).set_index('set_element'))
# add data for heatpumps
prop_g = prop_g.append(pd.DataFrame.from_dict(dict_additions['heatpumps']).set_index('set_element'))
# remove non-existent plant
prop_g = prop_g.stack(-1).swaplevel(axis=0)
prop_g = prop_g.dropna()
# update instantiation dictionary
dict_instantiate.update({'tec_props': prop_g})
# add 'tec'-set to dict_sets
dict_sets.update({'i': pd.DataFrame(data=True, index=prop_g.index.get_level_values(1).unique().values,
columns=['Value'])})
static_data['feasops']['fuel_name'] = (static_data['feasops']['medea_type'] / 10).apply(np.floor) * 10
static_data['feasops']['fuel_name'].replace({y: x for x, y in dict_sets['f'].itertuples()}, inplace=True)
static_data['feasops']['set_element'] = static_data['feasops']['medea_type']
static_data['feasops']['set_element'].replace(
{x: y for x, y in static_data['tec'][['medea_type', 'set_element']].values}, inplace=True)
static_data['feasops'].dropna(inplace=True)
static_data['feasops'].set_index(['set_element', 'l', 'fuel_name'], inplace=True)
# following line produces memory error (0xC00000FD) --> workaround with element-wise division
# df_feasops['fuel_need'] = df_feasops['fuel']/ df_eff
# TODO: PerformanceWarning: indexing past lexsort depth may impact performance (3 times)
static_data['feasops']['fuel_need'] = np.nan
for typ in static_data['feasops'].index.get_level_values(0).unique():
for lim in static_data['feasops'].index.get_level_values(1).unique():
static_data['feasops'].loc[idx[typ, lim], 'fuel_need'] = static_data['feasops'].loc[
idx[typ, lim], 'fuel'].mean() / \
dict_static['eta'][typ][0]
# adjust static_data['tec'] to reflect modelled power plants
static_data['tec'].set_index('set_element', inplace=True)
static_data['tec'] = static_data['tec'].loc[static_data['tec'].index.isin(dict_sets['i'].index), :]
dict_instantiate['efficiency'] = \
dict_instantiate['efficiency'].loc[
dict_instantiate['efficiency'].index.get_level_values(0).isin(dict_sets['i'].index), :]
static_data['feasops'] = \
static_data['feasops'].loc[static_data['feasops'].index.get_level_values(0).isin(dict_sets['i'].index), :]
# --------------------------------------------------------------------------- #
# hydro storage data
# drop all ror data
plant_data['hydro'].drop(plant_data['hydro'][plant_data['hydro'].technology == 'Run-of-river'].index, inplace=True)
# filter out data without reservoir size in GWh
plant_data['hydro'].dropna(subset=['energy_max', 'power_in'], inplace=True)
# calculate duration of generation from full reservoir
plant_data['hydro']['max_duration'] = plant_data['hydro']['energy_max'] / plant_data['hydro']['power_out'] * 1000 / 24
plant_data['hydro']['count'] = 1
plant_data.update({'hydro_clusters': plant_data['hydro'].groupby(['technology', 'country',
pd.cut(plant_data['hydro']['max_duration'],
[0, 2, 7, 75])]).sum()})
plant_data['hydro_clusters']['efficiency_in'] = plant_data['hydro_clusters']['efficiency_in'] / \
plant_data['hydro_clusters']['count']
plant_data['hydro_clusters']['efficiency_out'] = plant_data['hydro_clusters']['efficiency_out'] / \
plant_data['hydro_clusters']['count']
plant_data['hydro_clusters']['cost_power'] = np.nan
plant_data['hydro_clusters']['cost_energy'] = np.nan
# assign technology and zone index to rows
plant_data['hydro_clusters']['country'] = plant_data['hydro_clusters'].index.get_level_values(1)
plant_data['hydro_clusters']['category'] = plant_data['hydro_clusters'].index.get_level_values(2).rename_categories(
['day', 'week', 'season']).astype(str)
plant_data['hydro_clusters']['tech'] = plant_data['hydro_clusters'].index.get_level_values(0)
plant_data['hydro_clusters']['tech'] = plant_data['hydro_clusters']['tech'].replace(['Pumped Storage', 'Reservoir'],
['psp', 'res'])
plant_data['hydro_clusters']['set_elem'] = plant_data['hydro_clusters']['tech'] + '_' + plant_data['hydro_clusters'][
'category']
plant_data['hydro_clusters'] = plant_data['hydro_clusters'].set_index(['set_elem', 'country'])
plant_data['hydro_clusters'].fillna(0, inplace=True)
plant_data['hydro_clusters']['power_out'] = plant_data['hydro_clusters']['power_out'] / 1000 # conversion from MW to GW
plant_data['hydro_clusters']['power_in'] = plant_data['hydro_clusters']['power_in'] / 1000 # conversion from MW to GW
plant_data['hydro_clusters']['inflow_factor'] = (
plant_data['hydro_clusters']['energy_max'] / plant_data['hydro_clusters']['energy_max'].sum())
plant_data['hydro_clusters'] = plant_data['hydro_clusters'].loc[:, ['power_in', 'power_out', 'energy_max',
'efficiency_in', 'efficiency_out', 'cost_power',
'cost_energy', 'inflow_factor']].copy()
# append battery data
bat_idx = pd.MultiIndex.from_product([['battery'], list(cfg.zones)])
df_battery = pd.DataFrame(np.nan, bat_idx, dict_additions['batteries'].keys())
for zone in list(cfg.zones):
for key in dict_additions['batteries'].keys():
df_battery.loc[('battery', zone), key] = dict_additions['batteries'][key][0]
plant_data['storage_clusters'] = plant_data['hydro_clusters'].append(df_battery)
# --------------------------------------------------------------------------- #
# %% process time series data
# --------------------------------------------------------------------------- #
ts_data['timeseries']['DateTime'] = pd.to_datetime(ts_data['timeseries']['DateTime'])
ts_data['timeseries'].set_index('DateTime', inplace=True)
# constrain data to scenario year
ts_data['timeseries'] = ts_data['timeseries'].loc[
(pd.Timestamp(cfg.year, 1, 1, 0, 0).tz_localize('UTC') <= ts_data['timeseries'].index) & (
ts_data['timeseries'].index <= pd.Timestamp(cfg.year, 12, 31, 23, 0).tz_localize('UTC'))]
# drop index and set index of df_time instead
if len(ts_data['timeseries']) == len(dict_sets['t']):
ts_data['timeseries'].set_index(dict_sets['t'].index, inplace=True)
else:
raise ValueError('Mismatch of time series data and model time resolution. Is cfg.year wrong?')
ts_data['timeseries']['DE-power-load'] = ts_data['timeseries']['DE-power-load'] / 0.91
# for 0.91 scaling factor see
# https://www.entsoe.eu/fileadmin/user_upload/_library/publications/ce/Load_and_Consumption_Data.pdf
# create price time series incl transport cost
ts_data['timeseries']['Nuclear'] = 3.5
ts_data['timeseries']['Lignite'] = 4.5
ts_data['timeseries']['Biomass'] = 6.5
# subset of zonal time series
ts_data['zonal'] = ts_data['timeseries'].loc[:, ts_data['timeseries'].columns.str.startswith(('AT', 'DE'))].copy()
ts_data['zonal'].columns = ts_data['zonal'].columns.str.split('-', expand=True)
# adjust column naming to reflect proper product names ('el' and 'ht')
ts_data['zonal'] = ts_data['zonal'].rename(columns={'power': 'el', 'heat': 'ht'})
model_prices = ['Coal', 'Oil', 'Gas', 'EUA', 'Nuclear', 'Lignite', 'Biomass', 'price_day_ahead']
ts_data['price'] = pd.DataFrame(index=ts_data['timeseries'].index,
columns=pd.MultiIndex.from_product([model_prices, cfg.zones]))
for zone in cfg.zones:
for fuel in model_prices:
if fuel in static_data['cost_transport'].index:
ts_data['price'][(fuel, zone)] = ts_data['timeseries'][fuel] + static_data['cost_transport'].loc[fuel, zone]
else:
ts_data['price'][(fuel, zone)] = ts_data['timeseries'][fuel]
ts_inflows = pd.DataFrame(index=list(ts_data['zonal'].index),
columns=pd.MultiIndex.from_product([cfg.zones, dict_sets['k'].index]))
for zone in list(cfg.zones):
for strg in dict_sets['k'].index:
if 'battery' not in strg:
ts_inflows.loc[:, (zone, strg)] = ts_data['zonal'].loc[:, idx[zone, 'inflows', 'reservoir']] * \
plant_data['storage_clusters'].loc[(strg, zone), 'inflow_factor']
ts_data.update({'inflows': ts_inflows})
dict_instantiate.update({'ancil': ts_data['zonal'].loc[:, idx[:, 'el', 'load']].max().unstack((1, 2)).squeeze() * 0.125
+ dict_instantiate['CAP_R'].unstack(1).drop('ror', axis=1).sum(axis=1) * 0.075})
dict_instantiate.update({'PEAK_LOAD': ts_data['zonal'].loc[:, idx[:, 'el', 'load']].max().unstack((1, 2)).squeeze()})
dict_instantiate.update({'PEAK_PROFILE': ts_data['zonal'].loc[:, idx[:, :, 'profile']].max().unstack(2).drop(
'ror', axis=0, level=1)})
# drop rows with all zeros
plant_data['storage_clusters'] = \
plant_data['storage_clusters'].loc[~(plant_data['storage_clusters'] == 0).all(axis=1), :].copy()
# --------------------------------------------------------------------------- #
# %% limits on investment - long-run vs short-run & # TODO: potentials
# --------------------------------------------------------------------------- #
invest_limits = {}
lim_invest_thermal = pd.DataFrame([0])
if cfg.invest_conventionals:
lim_invest_thermal = pd.DataFrame([float('inf')])
invest_limits.update({'thermal': lim_invest_thermal})
# dimension lim_invest_itm[r, tec_itm]
lim_invest_itm = pd.DataFrame(data=0, index=cfg.zones, columns=dict_sets['n'].index)
if cfg.invest_renewables:
for zone in cfg.zones:
for itm in lim_invest_itm.columns:
lim_invest_itm.loc[zone, itm] = float(static_data['potentials'].loc[itm, zone])
invest_limits.update({'intermittent': lim_invest_itm})
# dimension lim_invest_storage[r, tec_strg]
lim_invest_storage = | pd.DataFrame(data=0, index=cfg.zones, columns=dict_sets['k'].index) | pandas.DataFrame |
from english_words import english_words_set
import pandas as pd
import numpy as np
# make a list of 5 lenght words without non-alphas, and no proper nouns
words5 = []
for word in english_words_set:
if len(word) == 5 and word[0].islower() and word.isalpha():
words5.append(word)
df_words = pd.DataFrame(words5)
df_words.columns=['words']
df_words['first'] = df_words['words'].str.slice(0,1)
df_words['second'] = df_words['words'].str.slice(1,2)
df_words['third'] = df_words['words'].str.slice(2,3)
df_words['fourth'] = df_words['words'].str.slice(3,4)
df_words['fifth'] = df_words['words'].str.slice(4,5)
aplhas = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
dfalpha = pd.DataFrame(aplhas)
dfalpha.columns = ['letters']
dfalpha.set_index('letters', inplace=True)
# build the frequerncy table from each smaller dataframe
df_counts = df_words['first'].value_counts()
df_counts_second = df_words['second'].value_counts()
df_counts_third = df_words['third'].value_counts()
df_counts_fourth = df_words['fourth'].value_counts()
df_counts_fifth = df_words['fifth'].value_counts()
dfranks = pd.concat([dfalpha, df_counts], axis=1)
dfranks = pd.concat([dfranks, df_counts_second], axis=1)
dfranks = pd.concat([dfranks, df_counts_third], axis=1)
dfranks = pd.concat([dfranks, df_counts_fourth], axis=1)
dfranks = pd.concat([dfranks, df_counts_fifth], axis=1)
def freq_ranking(word):
val = dfranks['first'][word[0]] + dfranks['second'][word[1]] +dfranks['third'][word[2]] + dfranks['fourth'][word[3]] +dfranks['fifth'][word[4]]
return val
df_word_ranks = | pd.DataFrame({'word' : [],'value':[]}) | pandas.DataFrame |
# Let's start off by loading in Jeff's CDR3's
import numpy as np
import pandas
def getBunker():
total_Abs=pandas.read_csv('app_data/mouse_IgA.dat',sep='\s+',header=None,names=['cdrL1_aa','cdrL2_aa','cdrL3_aa','cdrH1_aa','cdrH2_aa','cdrH3_aa','react'])
total_abs1 = total_Abs.where((pandas.notnull(total_Abs)), '')
# Remove X's in sequences... Should actually get a count of these at some point...
total_abs2=total_abs1[~total_abs1['cdrL1_aa'].str.contains("X")]
total_abs3=total_abs2[~total_abs2['cdrL2_aa'].str.contains("X")]
total_abs4=total_abs3[~total_abs3['cdrL3_aa'].str.contains("X")]
total_abs5=total_abs4[~total_abs4['cdrH1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdrH2_aa'].str.contains("X")]
total_abs7=total_abs6[~total_abs6['cdrH3_aa'].str.contains("X")]
mono_all=total_abs7[total_abs7['react'].isin([0.0,1.0])].values
poly_all=total_abs7[total_abs7['react'].isin([2.0,3.0,4.0,5.0,6.0,7.0])].values
mono=total_abs7[total_abs7['react'].isin([0.0])].values
poly=total_abs7[total_abs7['react'].isin([5.0,6.0,7.0])].values
a=0
del_these=[]
for i in np.arange(len(mono_all[:,5])):
if mono_all[i,5] == '' or mono_all[i,4] == '' or mono_all[i,3] == '' or mono_all[i,2] == '' or mono_all[i,1] == '' or mono_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono_all2=np.delete(mono_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly_all[:,5])):
if poly_all[i,5] == '' or poly_all[i,4] == '' or poly_all[i,3] == '' or poly_all[i,2] == '' or poly_all[i,1] == '' or poly_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly_all2=np.delete(poly_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(mono[:,5])):
if mono[i,5] == '' or mono[i,4] == '' or mono[i,3] == '' or mono[i,2] == '' or mono[i,1] == '' or mono[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono2=np.delete(mono,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly[:,5])):
if poly[i,5] == '' or poly[i,4] == '' or poly[i,3] == '' or poly[i,2] == '' or poly[i,1] == '' or poly[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly2=np.delete(poly,del_these,axis=0)
return(np.transpose(mono_all2[:,0:6]),np.transpose(poly_all2[:,0:6]),np.transpose(mono2[:,0:6]),np.transpose(poly2[:,0:6]))
#####################################################################################
def getJenna():
total_Abs=pandas.read_csv('app_data/flu_IgG.dat',sep='\s+',header=None,
names=['cdrL1_aa','cdrL2_aa','cdrL3_aa','cdrH1_aa','cdrH2_aa','cdrH3_aa','react'])
total_abs1 = total_Abs.where((pandas.notnull(total_Abs)), '')
# Remove X's in sequences... Should actually get a count of these at some point...
total_abs2=total_abs1[~total_abs1['cdrL1_aa'].str.contains("X")]
total_abs3=total_abs2[~total_abs2['cdrL2_aa'].str.contains("X")]
total_abs4=total_abs3[~total_abs3['cdrL3_aa'].str.contains("X")]
total_abs5=total_abs4[~total_abs4['cdrH1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdrH2_aa'].str.contains("X")]
total_abs7=total_abs6[~total_abs6['cdrH3_aa'].str.contains("X")]
# Having this and the above lines as "if" options could make this loader more generalizable...
mono_all=total_abs7[total_abs7['react'].isin([0,1])].values
poly_all=total_abs7[total_abs7['react'].isin([2,3,4,5,6,7])].values
mono=total_abs7[total_abs7['react'].isin([0])].values
poly=total_abs7[total_abs7['react'].isin([5,6,7])].values
a=0
del_these=[]
for i in np.arange(len(mono_all[:,5])):
if mono_all[i,5] == '' or mono_all[i,4] == '' or mono_all[i,3] == '' or mono_all[i,2] == '' or mono_all[i,1] == '' or mono_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono_all2=np.delete(mono_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly_all[:,5])):
if poly_all[i,5] == '' or poly_all[i,4] == '' or poly_all[i,3] == '' or poly_all[i,2] == '' or poly_all[i,1] == '' or poly_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly_all2=np.delete(poly_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(mono[:,5])):
if mono[i,5] == '' or mono[i,4] == '' or mono[i,3] == '' or mono[i,2] == '' or mono[i,1] == '' or mono[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono2=np.delete(mono,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly[:,5])):
if poly[i,5] == '' or poly[i,4] == '' or poly[i,3] == '' or poly[i,2] == '' or poly[i,1] == '' or poly[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly2=np.delete(poly,del_these,axis=0)
return(np.transpose(mono_all2[:,0:6]),np.transpose(poly_all2[:,0:6]),np.transpose(mono2[:,0:6]),np.transpose(poly2[:,0:6]))
def getHugo():
my_heavy=pandas.read_csv('app_data/hiv_igg_data/gut_heavy_aa.dat',sep='\s+')
my_light=pandas.read_csv('app_data/hiv_igg_data/gut_light_aa.dat',sep='\s+')
poly_YN=pandas.read_csv('app_data/hiv_igg_data/gut_num_react.dat',sep='\s+',header=None,names=['react'])
total_abs= | pandas.concat([my_light,my_heavy,poly_YN],axis=1) | pandas.concat |
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import power_transform
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import os
import datetime
import numpy as np
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
#Garbage Collector
import gc
os.getcwd()
os.chdir('C:/Users/Mann-A2/Documents/Python Repository/IEEE Fraud Detection - Kaggle/ieee-fraud-detection')
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
train_identity = pd.read_csv('train_identity.csv')
train_transaction = pd.read_csv('train_transaction.csv')
test_identity = pd.read_csv('test_identity.csv')
test_transaction = pd.read_csv('test_transaction.csv')
# function to reduce size
def reduce_mem_usage(df):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
#split function
def id_split(dataframe):
dataframe['device_name'] = dataframe['DeviceInfo'].str.split('/', expand=True)[0]
dataframe['device_version'] = dataframe['DeviceInfo'].str.split('/', expand=True)[1]
dataframe['OS_id_30'] = dataframe['id_30'].str.split(' ', expand=True)[0]
dataframe['version_id_30'] = dataframe['id_30'].str.split(' ', expand=True)[1]
dataframe['browser_id_31'] = dataframe['id_31'].str.split(' ', expand=True)[0]
dataframe['version_id_31'] = dataframe['id_31'].str.split(' ', expand=True)[1]
dataframe['screen_width'] = dataframe['id_33'].str.split('x', expand=True)[0]
dataframe['screen_height'] = dataframe['id_33'].str.split('x', expand=True)[1]
dataframe['id_34'] = dataframe['id_34'].str.split(':', expand=True)[1]
dataframe['id_23'] = dataframe['id_23'].str.split(':', expand=True)[1]
dataframe.loc[dataframe['device_name'].str.contains('SM', na=False), 'device_name'] = 'Samsung'
dataframe.loc[dataframe['device_name'].str.contains('SAMSUNG', na=False), 'device_name'] = 'Samsung'
dataframe.loc[dataframe['device_name'].str.contains('GT-', na=False), 'device_name'] = 'Samsung'
dataframe.loc[dataframe['device_name'].str.contains('Moto G', na=False), 'device_name'] = 'Motorola'
dataframe.loc[dataframe['device_name'].str.contains('Moto', na=False), 'device_name'] = 'Motorola'
dataframe.loc[dataframe['device_name'].str.contains('moto', na=False), 'device_name'] = 'Motorola'
dataframe.loc[dataframe['device_name'].str.contains('LG-', na=False), 'device_name'] = 'LG'
dataframe.loc[dataframe['device_name'].str.contains('rv:', na=False), 'device_name'] = 'RV'
dataframe.loc[dataframe['device_name'].str.contains('HUAWEI', na=False), 'device_name'] = 'Huawei'
dataframe.loc[dataframe['device_name'].str.contains('ALE-', na=False), 'device_name'] = 'Huawei'
dataframe.loc[dataframe['device_name'].str.contains('-L', na=False), 'device_name'] = 'Huawei'
dataframe.loc[dataframe['device_name'].str.contains('Blade', na=False), 'device_name'] = 'ZTE'
dataframe.loc[dataframe['device_name'].str.contains('BLADE', na=False), 'device_name'] = 'ZTE'
dataframe.loc[dataframe['device_name'].str.contains('Linux', na=False), 'device_name'] = 'Linux'
dataframe.loc[dataframe['device_name'].str.contains('XT', na=False), 'device_name'] = 'Sony'
dataframe.loc[dataframe['device_name'].str.contains('HTC', na=False), 'device_name'] = 'HTC'
dataframe.loc[dataframe['device_name'].str.contains('ASUS', na=False), 'device_name'] = 'Asus'
dataframe.loc[dataframe.device_name.isin(dataframe.device_name.value_counts()[dataframe.device_name.value_counts() < 200].index), 'device_name'] = "Others"
dataframe['had_id'] = 1
gc.collect()
return dataframe
train_identity = id_split(train_identity)
test_identity = id_split(test_identity)
#Data joining
train = pd.merge(train_transaction, train_identity, on='TransactionID', how='left', left_index=True, right_index=True)
test = pd.merge(test_transaction, test_identity, on='TransactionID', how='left', left_index=True, right_index=True)
print('Data was successfully merged!\n')
del train_identity, train_transaction, test_identity, test_transaction
print(f'Train dataset has {train.shape[0]} rows and {train.shape[1]} columns.')
print(f'Test dataset has {test.shape[0]} rows and {test.shape[1]} columns.\n')
#============================================================================
useful_features = ['isFraud', 'TransactionDT', 'TransactionAmt', 'ProductCD', 'card1', 'card2', 'card3', 'card4', 'card5', 'card6', 'addr1', 'addr2', 'dist1',
'P_emaildomain', 'R_emaildomain', 'C1', 'C2', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13',
'C14', 'D1', 'D2', 'D3', 'D4', 'D5', 'D10', 'D11', 'D15', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9',
'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11', 'V12', 'V13', 'V17',
'V19', 'V20', 'V29', 'V30', 'V33', 'V34', 'V35', 'V36', 'V37', 'V38', 'V40', 'V44', 'V45', 'V46', 'V47', 'V48',
'V49', 'V51', 'V52', 'V53', 'V54', 'V56', 'V58', 'V59', 'V60', 'V61', 'V62', 'V63', 'V64', 'V69', 'V70', 'V71',
'V72', 'V73', 'V74', 'V75', 'V76', 'V78', 'V80', 'V81', 'V82', 'V83', 'V84', 'V85', 'V87', 'V90', 'V91', 'V92',
'V93', 'V94', 'V95', 'V96', 'V97', 'V99', 'V100', 'V126', 'V127', 'V128', 'V130', 'V131', 'V138', 'V139', 'V140',
'V143', 'V145', 'V146', 'V147', 'V149', 'V150', 'V151', 'V152', 'V154', 'V156', 'V158', 'V159', 'V160', 'V161',
'V162', 'V163', 'V164', 'V165', 'V166', 'V167', 'V169', 'V170', 'V171', 'V172', 'V173', 'V175', 'V176', 'V177',
'V178', 'V180', 'V182', 'V184', 'V187', 'V188', 'V189', 'V195', 'V197', 'V200', 'V201', 'V202', 'V203', 'V204',
'V205', 'V206', 'V207', 'V208', 'V209', 'V210', 'V212', 'V213', 'V214', 'V215', 'V216', 'V217', 'V219', 'V220',
'V221', 'V222', 'V223', 'V224', 'V225', 'V226', 'V227', 'V228', 'V229', 'V231', 'V233', 'V234', 'V238', 'V239',
'V242', 'V243', 'V244', 'V245', 'V246', 'V247', 'V249', 'V251', 'V253', 'V256', 'V257', 'V258', 'V259', 'V261',
'V262', 'V263', 'V264', 'V265', 'V266', 'V267', 'V268', 'V270', 'V271', 'V272', 'V273', 'V274', 'V275', 'V276',
'V277', 'V278', 'V279', 'V280', 'V282', 'V283', 'V285', 'V287', 'V288', 'V289', 'V291', 'V292', 'V294', 'V303',
'V304', 'V306', 'V307', 'V308', 'V310', 'V312', 'V313', 'V314', 'V315', 'V317', 'V322', 'V323', 'V324', 'V326',
'V329', 'V331', 'V332', 'V333', 'V335', 'V336', 'V338', 'id_01', 'id_02', 'id_05', 'id_06',
'id_11', 'id_12', 'id_13', 'id_15', 'id_17', 'id_19', 'id_20', 'id_31', 'id_36', 'id_37', 'id_38', 'DeviceType',
'DeviceInfo', 'device_name', 'device_version', 'OS_id_30', 'version_id_30',
'browser_id_31', 'version_id_31', 'screen_width', 'screen_height', 'had_id']
cols_to_drop = [col for col in train.columns if col not in useful_features]
train = train.drop(cols_to_drop, axis=1)
test = test.drop(cols_to_drop, axis=1)
#Merging email columns
train.P_emaildomain.fillna(train.R_emaildomain, inplace=True)
del train['R_emaildomain']
test.P_emaildomain.fillna(test.R_emaildomain, inplace=True)
del test['R_emaildomain']
# New feature - log of transaction amount. ()
train['TransactionAmt_Log'] = np.log(train['TransactionAmt'])
test['TransactionAmt_Log'] = np.log(test['TransactionAmt'])
# New feature - decimal part of the transaction amount.
train['TransactionAmt_decimal'] = ((train['TransactionAmt'] - train['TransactionAmt'].astype(int)) * 1000).astype(int)
test['TransactionAmt_decimal'] = ((test['TransactionAmt'] - test['TransactionAmt'].astype(int)) * 1000).astype(int)
# New feature - day of week in which a transaction happened.
train['Transaction_day_of_week'] = np.floor((train['TransactionDT'] / (3600 * 24) - 1) % 7)
test['Transaction_day_of_week'] = np.floor((test['TransactionDT'] / (3600 * 24) - 1) % 7)
# New feature - hour of the day in which a transaction happened.
train['Transaction_hour'] = np.floor(train['TransactionDT'] / 3600) % 24
test['Transaction_hour'] = np.floor(test['TransactionDT'] / 3600) % 24
del train['TransactionAmt'], train['TransactionDT']
del test['TransactionAmt'], test['TransactionDT']
#handling missing values -- replacing with -999
train.replace(np.nan, -999, inplace=True)
test.replace(np.nan, -999, inplace=True)
train.isnull().sum()
test.isnull().sum()
#=====================
#You can use isnull with mean for treshold and then remove columns by boolean
# indexing with loc (because remove columns), also need invert condition -
# so <.8 means remove all columns >=0.8:
#
#df = df.loc[:, df.isnull().mean() < .8]
#Label Encoding
# Encoding - count encoding for both train and test
for feature in ['card1', 'card2', 'card3', 'card4', 'card5', 'card6', 'id_36']:
train[feature + '_count_full'] = train[feature].map(pd.concat([train[feature], test[feature]], ignore_index=True).value_counts(dropna=False))
test[feature + '_count_full'] = test[feature].map(pd.concat([train[feature], test[feature]], ignore_index=True).value_counts(dropna=False))
# Encoding - count encoding separately for train and test
for feature in ['id_01', 'id_31', 'id_36']:
train[feature + '_count_dist'] = train[feature].map(train[feature].value_counts(dropna=False))
test[feature + '_count_dist'] = test[feature].map(test[feature].value_counts(dropna=False))
for col in train.columns:
if train[col].dtype == 'object':
le = LabelEncoder()
le.fit(list(train[col].astype(str).values) + list(test[col].astype(str).values))
train[col] = le.transform(list(train[col].astype(str).values))
test[col] = le.transform(list(test[col].astype(str).values))
train.to_csv('training_set.csv', index = None, header=True)
test.to_csv('testing_set.csv', index = None, header=True)
print("\nData successfully prepared")
#=======================================================================================================================
#Model Building
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import power_transform
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
from sklearn.metrics import cohen_kappa_score, make_scorer
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from xgboost import XGBClassifier
from pystacknet.pystacknet import StackNetClassifier
#pip install lightgbm
#pip install catboost
#pip install pystacknet-master
import matplotlib.pyplot as plt
import os
import datetime
import numpy as np
import seaborn as sns
os.getcwd()
os.chdir('C:/Users/Mann-A2/Documents/Python Repository/IEEE Fraud Detection - Kaggle/ieee-fraud-detection - worked')
train = pd.read_csv('training_set.csv')
test = pd.read_csv('testing_set.csv')
train = reduce_mem_usage(train)
test = reduce_mem_usage(test)
X_train = train.drop(['isFraud'], axis=1)
y_train = train['isFraud']
X_test = test
#LR #0.8052
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(random_state=123,solver = 'liblinear')
clf.fit(X_train, y_train)
y_pred = clf.predict_proba(X_test)
pd.DataFrame(y_pred, columns=['predictions','isFraud']).to_csv('prediction LR.csv')
#RF #0.9119
from sklearn.ensemble import RandomForestRegressor
clf = RandomForestRegressor(
n_estimators=200, max_features=0.4, min_samples_split=50,
min_samples_leaf=100, n_jobs=-1, verbose=2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
pd.DataFrame(y_pred).to_csv('prediction RF.csv')
#Simple -- XGB #0.9342
clf = XGBClassifier(n_estimators=500,
n_jobs=4,
max_depth=9,
learning_rate=0.05,
subsample=0.9,
colsample_bytree=0.9,
missing=-999)
clf.fit(X_train, y_train)
y_pred = clf.predict_proba(X_test)
pd.DataFrame(y_pred, columns=['predictions','isFraud']).to_csv('prediction XGB.csv')
#**StackNet Model**-==============
# LGBMClassifier without GPU
clf_lgb = LGBMClassifier(
max_bin=63,
num_leaves=255,
num_iterations=1000,
learning_rate=0.01,
tree_learner="serial",
task="train",
is_training_metric=False,
min_data_in_leaf=1,
min_sum_hessian_in_leaf=100,
sparse_threshold=1.0,
num_thread=-1,
save_binary=True,
seed=42,
feature_fraction_seed=42,
bagging_seed=42,
drop_seed=42,
data_random_seed=42,
objective="binary",
boosting_type="gbdt",
verbose=1,
metric="auc",
is_unbalance=True,
boost_from_average=False,
)
clf_lgb.fit(X_train, y_train)
y_pred = clf_lgb.predict_proba(X_test)
pd.DataFrame(y_pred, columns=['predictions','isFraud']).to_csv('prediction LGBMClassifier.csv')
# XGBClassifier without GPU
clf_xgb = XGBClassifier(
n_estimators=1000,
max_depth=9,
learning_rate=0.05,
subsample=0.9,
colsample_bytree=0.9,
missing=-999,
n_jobs=-1,
random_state=42,
)
clf_xgb.fit(X_train, y_train)
y_pred = clf_xgb.predict_proba(X_test)
pd.DataFrame(y_pred, columns=['predictions','isFraud']).to_csv('prediction XGBClassifier.csv')
# CatBoostClassifier without GPU
param_cb = {
'learning_rate': 0.2,
'bagging_temperature': 0.1,
'l2_leaf_reg': 30,
'depth': 12,
'max_bin':255,
'iterations' : 1000,
'loss_function' : "Logloss",
'objective':'CrossEntropy',
'eval_metric' : "AUC",
'bootstrap_type' : 'Bayesian',
'random_seed':42,
'early_stopping_rounds' : 100,
}
clf_ctb = CatBoostClassifier(silent=True, **param_cb)
clf_ctb.fit(X_train, y_train)
y_pred = clf_ctb.predict_proba(X_test)
| pd.DataFrame(y_pred, columns=['predictions','isFraud']) | pandas.DataFrame |
def get_files_in_path(path, ext="wav"):
"""
Get files in a path
exampe : files = get_files_in_path("./audioFiles")
"""
import os, glob
path = os.path.join(path, "*."+ext)
theFiles = glob.glob(path, recursive=True)
return theFiles
def find_last_slash_pos_in_path(path):
"""
Find last slash position in a path
exampe : files = find_last_slash_pos_in_path("./audioFiles/abc.wav")
output : integer
the value that is the position of the last slash
"""
import os
LastSlashPos = path.rfind(os.path.split(path)[-1]) - 1
return LastSlashPos
def search_csv(csv_file, search_term, colomn_searched, colomn_out):
'''
Search a string in a csv file and a colomn and get it's corresponding value for a different colomn.
example : valenz = search_csv('labels-sorted.csv', '001_01.wav', 'Laufnummer', 'Valenz')
'''
import pandas as pd
df = pd.read_csv(csv_file)
out = df[df[colomn_searched] == search_term][colomn_out]
ret = out.values
if len(ret) == 1:
return ret[0]
else:
return -1
def writeLineToCSV(csvPath, headers, values):
'''
Write one line to CSV
example : writeLineToCSV("test.csv", ["a", "b", "c"], ["something",16,34])
'''
import pandas as pd
import os
LastSlashPos = csvPath.rfind(os.path.split(csvPath)[-1]) - 1
if not os.path.exists(csvPath[:LastSlashPos]): os.makedirs(csvPath[:LastSlashPos])
dic = {}
for i, header in enumerate(headers): dic[header] = values[i]
data = [dic]
if os.path.exists(csvPath):
df = pd.read_csv(csvPath)
df = df.append(data, ignore_index=True, sort=False)
else:
df = pd.DataFrame(data, columns = headers)
df.to_csv(csvPath, index=False)
def arff2csv(arff_path, csv_path=None, _encoding='utf8'):
"""
This function was copied from https://github.com/Hutdris/arff2csv/blob/master/arff2csv.py
It turns .arff files into csvs.
"""
with open(arff_path, 'r', encoding=_encoding) as fr:
attributes = []
if csv_path is None:
csv_path = arff_path[:-4] + 'csv' # *.arff -> *.csv
write_sw = False
with open(csv_path, 'w', encoding=_encoding) as fw:
for line in fr.readlines():
if write_sw:
if line == "": print("emp")
fw.write(line)
elif '@data' in line:
fw.write(','.join(attributes) + '\n')
write_sw = True
elif '@attribute' in line:
attributes.append(line.split()[1]) # @attribute attribute_tag numeric
print("Convert {} to {}.".format(arff_path, csv_path))
def divide_list(list, perc=0.5):
"""
Divide a list into two new lists. perc is the first list's share. If perc=0.6 then the first new list will have 60 percent of the original list.
example : f,s = divide_list([1,2,3,4,5,6,7], perc=0.7)
"""
origLen = len(list)
lim = int(perc*origLen)
firstList = list[:lim]
secondList = list[lim:]
return firstList, secondList
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = "fit", fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
import os
rows, columns = os.popen('stty size', 'r').read().split()
if length=="fit": length = int(columns) // 2
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def csvReader(fullPath, headers, standardize=False):
import pandas as pd
import numpy as np
# print(fullPath)
df = | pd.read_csv(fullPath) | pandas.read_csv |
'''
OOPitch brings Object-Oriented programming to football analytics. It is based on the most common data-analysis
libraries -- numpy (scipy), pandas and matplotlib -- and it extends the computational geometry library shapely
to account for the necessities of football analytics.
'''
import numpy as np
from scipy.signal import savgol_filter
from shapely.geometry import Polygon, LineString, MultiPoint, MultiPolygon
import shapely.wkt as wkt
from shapely_football import Point, SubPitch
# We use geopandas most as a plotter. This will go away in more mature versions
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.stats import norm
import pickle as pkl
from copy import copy
meters_per_yard = 0.9144 # unit conversion from yards to meters
class ObejctOnPitch:
'''
Base class. Define main attributes and methods for ball and players
'''
def __init__(self, id, positions=None, hertz=None, smoothing=True, filter_='Savitzky-Golay', window_length=7,
max_speed=12, **kwargs):
self.__id = id
# Set positions
if positions is not None:
if hertz is None: raise ValueError("If positions is specified, you need to indicate hertz as well.")
self.create_positions(positions)
self.calculate_velocities(hertz, smoothing=smoothing, filter_=filter_, window_length=window_length,
max_speed=max_speed, **kwargs)
else:
self.__positions = np.nan
self.__velocity = np.nan
self.__smoothing = None
self.__filter_par = None
self.__total_distance = np.nan
# Define state methods for pickling
def __getstate__(self):
state = copy(self.__dict__)
# Pickling geo-series is impossible. we get around it by saving the geo-series in a wkt format
if self.__positions is not None:
# Fill nas, otherwise we trigger an error when creating pos
pos = self.__positions.copy().fillna(value=Point([-999, -999]))
# Save positions as wkt
pos = MultiPoint(pos.geometry.to_list())
# We save frames for reference
state['_ObejctOnPitch__frames'] = self.__positions.index.values
state['_ObejctOnPitch__positions'] = pos.wkt
else:
state['_ObejctOnPitch__frames'] = None
return state
def __setstate__(self, state):
# Load positions from wkt if necessary
if state['_ObejctOnPitch__positions'] is not None:
pos = wkt.loads(state['_ObejctOnPitch__positions'])
pos = gpd.GeoSeries([Point(p) for p in pos.geoms])
pos.loc[pos == Point([-999, -999])] = None
pos.index = state['_ObejctOnPitch__frames']
state['_ObejctOnPitch__positions'] = pos
del state['_ObejctOnPitch__frames']
self.__dict__.update(state)
def calculate_velocities(self, hertz, smoothing=True, filter_='Savitzky-Golay', window_length=7,
max_speed=12, **kwargs):
'''
Calculate velocities of an object based on a GeoSeries of Positions.
:param hertz: The tracking frequency per second
:param maxspeed: Max speed after which we consider the position to be in error
:param smoothing: Should a smoothing be applied?
:param filter_: If a smoothing is applied, which filter should be used? Either 'Savitzky-Golay' or 'linear'
:param kwargs: Arguments passed to the filter.
'''
if np.all(pd.isna(self.__positions)):
print("No valid positions to calculate velocity")
return None
self.__velocity_par = {'filter': None, 'hertz': hertz, 'max_speed': max_speed}
# Velocity is a Dataframe containing the x,y components as two columns
velocity_x = (self.__positions.x - self.__positions.shift(-1).x) * hertz
# Last point has no velocity
velocity_x.loc[self.__positions.index[-1]] = np.nan
velocity_y = (self.__positions.y - self.__positions.shift(-1).y) * hertz
# Last point has no velocity
velocity_y.loc[self.__positions.index[-1]] = np.nan
velocity = pd.DataFrame(np.array([velocity_x, velocity_y], dtype=np.float32).T)
velocity = velocity.rename(columns={0: 'x', 1: 'y'})
velocity['speed'] = np.linalg.norm(velocity.to_numpy(), axis=1)
self.__smoothing = False
# remove unsmoothed data points that exceed the maximum speed (these are most likely position errors)
if max_speed is not None and max_speed > 0:
velocity.loc[velocity['speed'] > max_speed, ['x', 'y', 'speed']] = np.nan
if smoothing:
if filter_ == 'Savitzky-Golay':
self.__velocity_par['filter'] = 'Savitzky-Golay'
self.__smoothing = savgol_filter
# This works as default
if 'polyorder' not in kwargs.keys():
kwargs['polyorder'] = 1
# save for later
kwargs['window_length'] = window_length
velocity['x'] = savgol_filter(velocity['x'], **kwargs)
velocity['y'] = savgol_filter(velocity['y'], **kwargs)
elif filter_ == 'moving_average':
self.__velocity_par['filter'] = 'moving_average'
self.__smoothing = pd.DataFrame.rolling
# save for later
kwargs['window'] = window_length
if 'min_periods' not in kwargs:
kwargs['min_periods'] = 1
if 'center' not in kwargs:
kwargs['center'] = True
velocity['x'] = velocity['x'].rolling(**kwargs).mean()
velocity['y'] = velocity['y'].rolling(**kwargs).mean()
else:
raise NotImplementedError("Savitzky-Golay and moving_average are the only options for smoothing.")
self.__filter_par = kwargs
# After filtering, recalculate total speed
velocity['speed'] = np.linalg.norm(velocity[['x', 'y']].to_numpy(), axis=1)
velocity.index = self.__positions.index
self.__velocity = velocity
def create_positions(self, positions):
'''
Register and save the positions of the player during the 90 minutes. Most of the function is actually about
creating velocity and acceleration estimates.
:param positions: A Series containing Points
'''
# A geoseries actually speeds up some operations
self.__positions = gpd.GeoSeries(positions)
# It turns out GeoSeries transforms np.nan in None
self.__positions.loc[self.__positions.isnull()] = np.nan
def correct_speed(self, halves, hertz):
'''
The calculate_speed function does not take into accout the end of the Periods within a game. This means that
there will be aberration in the speed calculation immediately after the second period (or extra-time) start.
This function will correct for those aberration.
:param halves: The frame where new halves start
'''
if np.all(pd.isna(self.__velocity)):
print("No valid velocity. Have you called calculate_velocities")
return None
for half in halves:
# Set the velocity of the last frame of an half to 0 for calculations
if self.__smoothing:
# There should be a window or window_length in the kwargs
try:
window = self.__filter_par['window_length']
self.__velocity.loc[(half - 1), ['x', 'y', 'speed']] = 0
except KeyError:
window = self.__filter_par['window']
self.__velocity.loc[(half - 1), ['x', 'y', 'speed']] = np.nan
# calculate the x, y components again for those frames that are affected
for col in ['x', 'y']:
before_half = np.array(range((half - 1 - window * 2), (half - 1)))
after_half = np.array(range(half, (half + window * 2)))
self.__velocity.loc[before_half, col] = (self.__positions.x.loc[before_half[1:]].to_numpy() -
self.__positions.x.loc[before_half[:-1]]) * hertz
self.__velocity.loc[before_half, col] = self.__smoothing(self.__velocity.loc[before_half, col],
**self.__filter_par).mean()
self.__velocity.loc[after_half, col] = (self.__positions.x.loc[after_half[1:]].to_numpy() -
self.__positions.x.loc[after_half[:-1]]) * hertz
self.__velocity.loc[after_half, col] = self.__smoothing(self.__velocity.loc[after_half, col],
**self.__filter_par).mean()
# Recalculate speed
self.__velocity.loc[(half - 1 - window * 2):(half + window * 2), 'speed'] = np.linalg.norm(
self.__velocity.loc[(half - 1 - window * 2):(half + window * 2), ['x', 'y']].to_numpy(), axis=1)
# Set the velocity of the last frame of an half to nan
self.__velocity.loc[(half - 1), ['x', 'y', 'speed']] = np.nan
@property
def positions(self):
return self.__positions
@positions.setter
def positions(self, positions):
self.create_positions(positions)
kwargs = copy(self.__filter_par)
try:
window = kwargs['window_length']
del kwargs['window_length']
except KeyError:
window = kwargs['window']
del kwargs['window']
self.calculate_velocities(hertz=self.__velocity_par['hertz'], smoothing=self.__smoothing,
filter_=self.__velocity_par['filter'], max_speed=self.__velocity_par['max_speed'],
window_length=window, **kwargs)
@property
def id(self):
return (self.__id)
@property
def velocity(self):
return (self.__velocity)
@property
def total_distance(self):
'''
Total distance covered in the match
'''
if np.isnan(self.__total_distance):
self.__total_distance = LineString(self.positions.to_list()).length
return(self.__total_distance)
def plot(self, frames, pitch = None, figax = None, color='red', player_marker_size=10, player_alpha=0.7):
'''
Plot the positions of a player over a pitch. Return the used axis if needed
:param frames: Which frames should be plotted
:param pitch: A Pitch object, where the player is moving
:param figax: A figure, axis couple. This is an alternative to the pitch
:param color: The color of the player's marker
:param player_marker_size: How big the marker should be
:param player_alpha: The alpha for the plaeyer marker
:return: The axis that has just been modified
'''
if pitch is None and figax is None:
raise AttributeError("Exactly one among pitch and figax must be specified")
if pitch is not None:
figax = pitch.plot()
fig, ax = figax
ax.text(self.__positions.loc[frames[0]].x + 0.5, self.__positions.loc[frames[0]].y + 0.5, self.__id,
fontsize=10, color=color)
ax = self.__positions.loc[frames].plot(ax=ax, color=color, markersize=player_marker_size, alpha=player_alpha)
return ax
class Player(ObejctOnPitch):
'''
Define players and all their methods/attributes
'''
def __init__(self, player_id, team, number=None, positions=None, name=None, hertz=None,
smoothing=True, filter_='Savitzky-Golay', window_length=7, max_speed=12, **kwargs):
super().__init__(player_id, positions=positions, hertz=hertz, smoothing=smoothing,
filter_=filter_, window_length=window_length, max_speed=max_speed, **kwargs)
self.__team = team
# Set number
if number is not None:
self.__number = number
else:
self.__number = np.nan
# Set name
if name is not None:
self.__name = name
else:
self.__name = np.nan
#Without data from other players it is impossible to know if a player is a GK
self.__is_goalkeeper = np.nan
@property
def number(self):
return (self.__number)
@property
def name(self):
return self.__name
@property
def team(self):
return self.__team
@team.setter
def team(self, new_team):
self.__team = new_team
@property
def GK(self):
return self.__is_goalkeeper
@GK.setter
def GK(self, value):
if isinstance(True, bool):
self.__is_goalkeeper = value
else:
raise TypeError("The value of Player.GK is either True or False.")
class Ball(ObejctOnPitch):
'''
Define the ball and its property
'''
def __init__(self, positions=None, hertz=None, smoothing=True, filter_='Savitzky-Golay', window_length=7,
max_speed=12, in_play=None, **kwargs):
super().__init__('ball', positions=positions, hertz=hertz, smoothing=smoothing,
filter_=filter_, window_length=window_length, max_speed=max_speed, **kwargs)
# DataFrame containing whether the ball is 'alive' or 'dead'
if in_play is not None:
assert self.positions is not None
assert in_play.shape[0] == super().positions.shape[0]
self.__in_play = in_play
@property
def in_play(self):
return self.__in_play
@in_play.setter
def in_play(self, in_play):
assert self.positions is not None
assert in_play.shape[0] == self.positions.shape[0]
self.__in_play = in_play
class Pitch:
'''
Define the Pitch where the game happens. Divide it in SubPitches for analysis sake.
We follow the convention of making the center of the field the 0,0 point and have negative values on the
bottom left (as a standard cartesian plane)
field_dimension: iterable of length 2. The field dimension. Should be in meter.
n_grid_cells_x: int. Regulates in how many SubPitch the Pitch is sub-divided into.
'''
def __init__(self, pitch_dimen=(106.0, 68.0), n_grid_cells_x=None):
self.__dimension = pitch_dimen
# Create one polygon representing the entire pitch. May be helpful for spatial operation (like, is a player
# on the pitch? Is the ball in play?)
self.__polygon = Polygon([(-self.__dimension[1] / 2, -self.__dimension[0] / 2),
(self.__dimension[1] / 2, -self.__dimension[0] / 2),
(self.__dimension[1] / 2, self.__dimension[0] / 2),
(-self.__dimension[1] / 2, self.__dimension[0] / 2)])
# Create patches for the subpitch
if n_grid_cells_x is not None:
self.__n_grid_cells_x = np.int(n_grid_cells_x)
self.create_subpitch(self.__n_grid_cells_x)
else:
self.__n_grid_cells_x = None
self.__n_grid_cells_y = None
self.__subpitch = None
self.__sub_centroids = None
# Define state methods for pickling
def __getstate__(self):
state = copy(self.__dict__)
state['_Pitch__polygon'] = state['_Pitch__polygon'].wkt
# Pickling geo-series is impossible. we get around it by saving the geo-series in a wkt format
if state['_Pitch__n_grid_cells_x'] is not None:
# Save subpitches as wkt
state['_Pitch__subpitch_inds'] = state['_Pitch__subpitch'].index.values
subp = MultiPolygon(state['_Pitch__subpitch'].geometry.to_list())
state['_Pitch__subpitch'] = subp.wkt
# Save centroids as wkt
state['_Pitch__sub_centroids_inds'] = state['_Pitch__sub_centroids'].index.values
cents = MultiPoint( state['_Pitch__sub_centroids'].geometry.to_list() )
state['_Pitch__sub_centroids'] = cents.wkt
else:
state['_Pitch__sub_centroids_inds'] = None
state['_Pitch__subpitch_inds'] = None
return state
def __setstate__(self, state):
state['_Pitch__polygon'] = wkt.loads(state['_Pitch__polygon'])
# Load sub-pitches and their centroids from wkt if necessary
if state['_Pitch__subpitch_inds'] is not None:
subp = wkt.loads(state['_Pitch__subpitch'])
subp = gpd.GeoSeries([SubPitch(p) for p in subp.geoms])
subp.index = state['_Pitch__subpitch_inds']
state['_Pitch__subpitch'] = subp
cents = wkt.loads(state['_Pitch__sub_centroids'])
cents = gpd.GeoSeries([Point(p) for p in cents.geoms])
cents.index = state['_Pitch__sub_centroids_inds']
state['_Pitch__sub_centroids'] = cents
del state['_Pitch__subpitch_inds']
del state['_Pitch__sub_centroids_inds']
self.__dict__.update(state)
def create_subpitch(self, n_grid_cells_x):
# break the pitch down into a grid
n_grid_cells_y = np.int(np.ceil((n_grid_cells_x + 1) * self.__dimension[1] / self.__dimension[0]))
# These are the extremes of each grid cell
xgrid = np.linspace(-self.__dimension[0] / 2., self.__dimension[0] / 2., n_grid_cells_x + 1)
ygrid = np.linspace(-self.__dimension[1] / 2., self.__dimension[1] / 2., n_grid_cells_y + 1)
self.__n_grid_cells_y = np.int(ygrid.shape[0] - 1)
subpitch = []
# navigate the grid to create subpitches
for i in range(xgrid.shape[0] - 1):
for j in range(ygrid.shape[0] - 1):
# Coordinate of this subpitch
coords = [(xgrid[i], ygrid[j]), (xgrid[i + 1], ygrid[j]),
(xgrid[i + 1], ygrid[j + 1]), (xgrid[i], ygrid[j + 1])]
subpitch.append(SubPitch(coords))
self.__subpitch = gpd.GeoSeries(subpitch)
# Create centroids as well
self.__sub_centroids = self.__subpitch.apply(lambda x: x.centroid)
@property
def dimension(self):
return self.__dimension
@dimension.setter
def dimension(self, dimension):
self.__dimension = dimension
if self.__n_grid_cells_x is not None:
self.create_subpitch(self.__n_grid_cells_x)
@property
def n_grid_cells_x(self):
return self.__n_grid_cells_x
@n_grid_cells_x.setter
def n_grid_cells_x(self, n_grid_cells_x):
if n_grid_cells_x is None:
self.__subpitch = None
self.__n_grid_cells_x = None
self.__sub_centroids = None
else:
self.__n_grid_cells_x = np.int(n_grid_cells_x)
self.create_subpitch(self.__n_grid_cells_x)
@property
def n_grid_cells_y(self):
return self.__n_grid_cells_y
@n_grid_cells_y.setter
def n_grid_cells_y(self, n_grid_cells_y):
raise NotImplementedError("At the moment, the only way to change the subpitch grid is to change n_grid_cells_x")
#
# @property
# def sub_pitch(self):
# return(self.__subpitch)
@property
def sub_pitch_area(self):
return(np.round(self.__subpitch.iloc[0].area, 3))
def plot(self, field_color='green', linewidth=2, markersize=20, fig_ax=None, grid=False, grid_alpha=1,
grid_col='black'):
"""
Plots a soccer pitch. Most of this code comes from <NAME>
Parameters
-----------
field_color: color of field. options are {'green','white'}
linewidth : width of lines. default = 2
markersize : size of markers (e.g. penalty spot, centre spot, posts). default = 20
fig_ax: figure, axis from matplotlib. default = None
grid: Boolean. Should the subpitch grid be plotted? Plots nothing if the pitch has no subpitch.
grid_alpha: float in [0,1]. What alpha should the grid have
grid_col: Color to be passed to matplotlib
Returns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
if fig_ax is None:
fig, ax = plt.subplots(figsize=(12, 8)) # create a figure
else:
fig, ax = fig_ax
# decide what color we want the field to be. Default is green, but can also choose white
if field_color == 'green':
ax.set_facecolor('mediumseagreen')
lc = 'whitesmoke' # line color
pc = 'w' # 'spot' colors
elif field_color == 'white':
lc = 'k'
pc = 'k'
# ALL DIMENSIONS IN m
border_dimen = (3, 3) # include a border arround of the field of width 3m
half_pitch_length = self.__dimension[0] / 2. # length of half pitch
half_pitch_width = self.__dimension[1] / 2. # width of half pitch
signs = [-1, 1]
# Soccer field dimensions typically defined in yards, so we need to convert to meters
goal_line_width = 8 * meters_per_yard
box_width = 20 * meters_per_yard
box_length = 6 * meters_per_yard
area_width = 44 * meters_per_yard
area_length = 18 * meters_per_yard
penalty_spot = 12 * meters_per_yard
corner_radius = 1 * meters_per_yard
D_length = 8 * meters_per_yard
D_radius = 10 * meters_per_yard
D_pos = 12 * meters_per_yard
centre_circle_radius = 10 * meters_per_yard
# plot half way line # center circle
ax.plot([0, 0], [-half_pitch_width, half_pitch_width], lc, linewidth=linewidth)
ax.scatter(0.0, 0.0, marker='o', facecolor=lc, linewidth=0, s=markersize)
y = np.linspace(-1, 1, 50) * centre_circle_radius
x = np.sqrt(centre_circle_radius ** 2 - y ** 2)
ax.plot(x, y, lc, linewidth=linewidth)
ax.plot(-x, y, lc, linewidth=linewidth)
for s in signs: # plots each line seperately
# plot pitch boundary
ax.plot([-half_pitch_length, half_pitch_length], [s * half_pitch_width, s * half_pitch_width], lc,
linewidth=linewidth)
ax.plot([s * half_pitch_length, s * half_pitch_length], [-half_pitch_width, half_pitch_width], lc,
linewidth=linewidth)
# goal posts & line
ax.plot([s * half_pitch_length, s * half_pitch_length], [-goal_line_width / 2., goal_line_width / 2.],
pc + 's',
markersize=6 * markersize / 20., linewidth=linewidth)
# 6 yard box
ax.plot([s * half_pitch_length, s * half_pitch_length - s * box_length], [box_width / 2., box_width / 2.],
lc,
linewidth=linewidth)
ax.plot([s * half_pitch_length, s * half_pitch_length - s * box_length], [-box_width / 2., -box_width / 2.],
lc,
linewidth=linewidth)
ax.plot([s * half_pitch_length - s * box_length, s * half_pitch_length - s * box_length],
[-box_width / 2., box_width / 2.], lc, linewidth=linewidth)
# penalty area
ax.plot([s * half_pitch_length, s * half_pitch_length - s * area_length],
[area_width / 2., area_width / 2.],
lc, linewidth=linewidth)
ax.plot([s * half_pitch_length, s * half_pitch_length - s * area_length],
[-area_width / 2., -area_width / 2.],
lc, linewidth=linewidth)
ax.plot([s * half_pitch_length - s * area_length, s * half_pitch_length - s * area_length],
[-area_width / 2., area_width / 2.], lc, linewidth=linewidth)
# penalty spot
ax.scatter(s * half_pitch_length - s * penalty_spot, 0.0, marker='o', facecolor=lc, linewidth=0,
s=markersize)
# corner flags
y = np.linspace(0, 1, 50) * corner_radius
x = np.sqrt(corner_radius ** 2 - y ** 2)
ax.plot(s * half_pitch_length - s * x, -half_pitch_width + y, lc, linewidth=linewidth)
ax.plot(s * half_pitch_length - s * x, half_pitch_width - y, lc, linewidth=linewidth)
# draw the D
y = np.linspace(-1, 1, 50) * D_length # D_length is the chord of the circle that defines the D
x = np.sqrt(D_radius ** 2 - y ** 2) + D_pos
ax.plot(s * half_pitch_length - s * x, y, lc, linewidth=linewidth)
# remove axis labels and ticks
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
# set axis limits
xmax = self.__dimension[0] / 2. + border_dimen[0]
ymax = self.__dimension[1] / 2. + border_dimen[1]
ax.set_xlim([-xmax, xmax])
ax.set_ylim([-ymax, ymax])
ax.set_axisbelow(True)
ax.set_aspect('equal')
if self.__n_grid_cells_x is not None and grid:
self.__subpitch.plot(ax=ax, facecolor="none", edgecolor=grid_col, alpha=grid_alpha)
return fig, ax
class Match:
'''
Contain all information about one match. It needs two iterables containing home and away Players, a Ball object, a
Pitch object, an event dataset. Hertz shows the frequency (per second) of the tracking data. Other attributes will
be specified in the future.
:param home_tracking:
:param away_tracking:
:param ball:
:param events: A DataFrame of events data. Its first column should be a frame reference -- even if no tracking data
is passed
:param pitch:
:param halves:
:param home_name:
:param away_name:
:param hertz:
'''
def __init__(self, home_tracking, away_tracking, ball, events, pitch, halves, home_name='home', away_name='away',
hertz=25, colors=('red', 'blue'), calculate_possession=False, possession_kwords={}):
self.__player_ids = [p.id for p in home_tracking] + [p.id for p in away_tracking]
self.__home_tracking = {p.id: p for p in home_tracking}
self.__pitch = pitch
self.__away_tracking = {p.id: p for p in away_tracking}
self.__ball = ball
self.__events = events
self.__hertz = hertz
self.__home_name = home_name
self.__away_name = away_name
self.__pitch_dimension = pitch.dimension
self.__team_names = (home_name, away_name)
self.__colors = {team:color for team, color in zip(self.__team_names, colors)}
self.__possession_pars = possession_kwords
# First frame new half
self.__halves_frame = halves
if not calculate_possession:
self.__possession = None
else:
self.assign_possesion(**possession_kwords)
# Correct speed at the end/start of an half
# for player in self.__home_tracking.values():
# print(f"Correcting Velocity for Player {player.id}.")
# player.correct_speed(halves, hertz)
# for player in self.away_tracking.values():
# print(f"Correcting Velocity for Player {player.id}.")
# player.correct_speed(halves, hertz)
# print(f"Correcting Velocity for Ball.")
# ball.correct_speed(halves, hertz)
# TODO This will only work if we have tracking data for the players
self.get_GKs()
# Define state methods for pickling
def __getstate__(self):
state = copy(self.__dict__)
# We need to makes sure we can pickle the players, the ball, the pitch and the events
# Start with the ball
state['_Match__ball'] = (state['_Match__ball'].__class__, state['_Match__ball'].__getstate__())
# Continue with player
state['_Match__away_tracking'] = {p_id: (p_obj.__class__, p_obj.__getstate__())
for p_id, p_obj in state['_Match__away_tracking'].items()}
state['_Match__home_tracking'] = {p_id: (p_obj.__class__, p_obj.__getstate__())
for p_id, p_obj in state['_Match__home_tracking'].items()}
# Then the pitch
state['_Match__pitch'] = (state['_Match__pitch'].__class__, state['_Match__pitch'].__getstate__())
# Finally, the events
# Check which columns are geometries
state['_Match__event_geometry'] = {}
events = self.__events.copy()
state['_Match__event_orders'] = events.columns.values.copy()
for col, dtype in zip(events.columns, events.dtypes):
# Save the geometry columns as wkt
if dtype == 'object' and isinstance(events.loc[~events[col].isna(), col].iloc[0], Point):
# Pandas have a strange behavior with fillna or simple assignment
g_col = events[col].apply(lambda x: Point([-999, -999]) if pd.isna(x) else x)
state['_Match__event_geometry'][col] = MultiPoint(g_col.to_list()).wkt
events = events.drop(columns=[col])
state['_Match__events'] = events
return state
def __setstate__(self, state):
# We need to rebuild the objects containing geometries
# Start with the ball
cls = state['_Match__ball'][0]
ball = cls.__new__(cls)
ball.__setstate__(state['_Match__ball'][1])
state['_Match__ball'] = ball
# Continue with players
away_tracking = {}
home_tracking = {}
for p_id, obj in state['_Match__away_tracking'].items():
cls = obj[0]
p = cls.__new__(cls)
p.__setstate__(obj[1])
away_tracking[p_id] = p
state['_Match__away_tracking'] = away_tracking
for p_id, obj in state['_Match__home_tracking'].items():
cls = obj[0]
p = cls.__new__(cls)
p.__setstate__(obj[1])
home_tracking[p_id] = p
state['_Match__home_tracking'] = home_tracking
# Then the pitch
cls = state['_Match__pitch'][0]
pitch = cls.__new__(cls)
pitch.__setstate__(state['_Match__pitch'][1])
state['_Match__pitch'] = pitch
# Now the events
for col, geoms in state['_Match__event_geometry'].items():
# Make a series
geoms = pd.Series([Point(p) for p in wkt.loads(geoms).geoms])
geoms.index = state['_Match__events'].index
# Get the Nans back
geoms[geoms == Point([-999., -999.])] = np.nan
state['_Match__events'][col] = geoms
state['_Match__events'] = state['_Match__events'][state['_Match__event_orders']]
del state['_Match__event_orders'], state['_Match__event_geometry']
self.__dict__.update(state)
def save(self, save_path, protocol=pkl.DEFAULT_PROTOCOL):
with open(save_path, 'wb') as fl:
pkl.dump(self, fl, protocol=protocol)
@property
def pitch(self):
return self.__pitch
@property
def player_ids(self):
return self.__player_ids
@property
def home_tracking(self):
return self.__home_tracking
@property
def away_tracking(self):
return self.__away_tracking
@property
def ball(self):
return self.__ball
@property
def events(self):
return self.__events
@property
def hertz(self):
return self.__hertz
@property
def home_team(self):
return (self.__home_name)
@property
def away_team(self):
return (self.__away_name)
@property
def teams(self):
return (self.__team_names)
@property
def halves(self):
return (self.__halves_frame)
@property
def team_colors(self):
return(self.__colors)
@team_colors.setter
def team_colors(self, colors):
'''
Change the colors of the team
:param colors: an iterable of length 2 containing the colors. Also a dictionary The colors must be specified in
a way that matplotlib understads.
'''
if not isinstance(colors, dict):
self.__colors = {team: color for team, color in zip(self.__team_names, colors)}
else:
ks = set(k for k in colors.keys())
if ks == set(team for team in self.__team_names):
self.__colors = colors
else:
raise KeyError("The key of the dictionary match the teams' name of the match object.")
@teams.setter
def teams(self, team_names):
'''
Change the team names. The names must be ordered, first home then away
:param new_team: A 2-element iterable of containing strings
'''
# If we change the team names, we want to change those in every player as well
self.__team_names = (team for team in team_names)
for player in self.__home_tracking.values():
player.team = self.__team_names[0]
for player in self.__away_tracking.values():
player.team = self.__team_names[1]
@property
def GKs(self):
return({k:p.id for k, p in self.__GKs.items()})
@property
def attack_directions(self):
'''
:return: Sign of the attacking direction
'''
return(self.__attack_dir)
def invert_attack_directions(self):
'''
Invert the attacking directions in the Match object
'''
for team in [self.__home_tracking, self.__away_tracking]:
for p_id, p in team.items():
print(f"Inverting: {p_id}")
p._ObejctOnPitch__positions = p._ObejctOnPitch__positions.geometry.affine_transform([-1, 0, 0, -1, 0, 0])
# Velocity is registered in floats, so we can simply multiply by -1
p._ObejctOnPitch__velocity[['x','y']] = p._ObejctOnPitch__velocity[['x', 'y']] * (-1)
print("Inverting ball")
self.__ball._ObejctOnPitch__positions = \
self.__ball._ObejctOnPitch__positions.geometry.affine_transform([-1, 0, 0, -1, 0, 0])
self.__ball._ObejctOnPitch__velocity[['x', 'y']] = self.__ball._ObejctOnPitch__velocity[['x', 'y']] * (-1)
for k in self.__attack_dir.keys():
self.__attack_dir[k] = self.__attack_dir[k] * (-1)
@property
def possession(self):
'''
Possession spells. Calculate them if not already calculated
'''
if self.__possession is None:
self.assign_possesion(**self.__possession_pars)
return self.__possession
@property
def possession_parameters(self):
'''
Parameters used in the calculation of possession
'''
return self.__possession_pars
@possession_parameters.setter
def possession_parameters(self, new_pars):
'''
Parameters used in the calculation of possession. May contain a partial change (no need to set all parameters
all the time)
'''
self.__possession_pars.update(new_pars)
def __getitem__(self, item):
'''
Quick ways to get tracking data or events
'''
if item in self.__player_ids:
try:
return self.__home_tracking[item]
except KeyError:
return self.__away_tracking[item]
elif item == 'ball':
return self.__ball
elif item == 'events':
return self.__events
raise KeyError(f"{item} is not `ball`, `events` or any of the players' id.")
def get_GKs(self):
'''
This function infers which player is the GK based on position at kick-off. It also calculates the attack sides.
'''
attack_sides = {team:0 for team in self.__team_names}
GKs = {team:np.nan for team in self.__team_names}
koff_frame = self.__events.iloc[0].name
# We infer attack direction based on the kickoff positions
for team_name, team in zip(self.__team_names, [self.__home_tracking, self.__away_tracking]):
_ = [p for p in team.values() if isinstance(p.positions.loc[koff_frame], Point)]
att_side = np.array([p.positions[koff_frame].x for p in _])
# This is the defense side of the team
attack_sides[team_name] = -np.sign(att_side.mean())
gk_pos = (attack_sides[team_name] * att_side).argmin()
GKs[team_name] = _[gk_pos]
for i, p in enumerate(_):
if i != gk_pos:
p.GK = False
else:
p.GK = True
self.__GKs = GKs
self.__attack_dir = attack_sides
def plot_frame(self, frame, figax=None, include_player_velocities=False, PlayerMarkerSize=10, PlayerAlpha=0.7,
annotate=False):
""" plot_frame( hometeam, awayteam )
Plots a frame of Metrica tracking data (player positions and the ball) on a football pitch. All distances should be
in meters.
Parameters
-----------
hometeam: row (i.e. instant) of the home team tracking data frame
awayteam: row of the away team tracking data frame
fig,ax: Can be used to pass in the (fig,ax) objects of a previously generated pitch. Set to (fig,ax) to use an
existing figure, or None (the default) to generate a new pitch plot,
team_colors: Tuple containing the team colors of the home & away team. Default is 'r' (red, home team) and 'b'
(blue away team)
field_dimen: tuple containing the length and width of the pitch in meters. Default is (106,68)
include_player_velocities: Boolean variable that determines whether player velocities are also plotted
(26500 quivers). Default is False
PlayerMarkerSize: size of the individual player marlers. Default is 10
PlayerAlpha: alpha (transparency) of player markers. Defaault is 0.7
annotate: Boolean variable that determines with player jersey numbers are added to the plot (default is False)
Returns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
if figax is None: # create new pitch
fig, ax = self.pitch.plot()
else: # overlay on a previously generated pitch
fig, ax = figax # unpack tuple
# plot home & away teams in order
relevant_players = {}
for team_name, team in zip(self.__team_names, [self.__home_tracking, self.__away_tracking]):
print(f"TEAM: {team_name}")
color = self.__colors[team_name]
_ = [p for p in team.values() if isinstance(p.positions.loc[frame], Point)]
relevant_players[team_name] = _
# X and Y position for the home/away team
Xs = [p.positions.loc[frame].x for p in _]
Ys = [p.positions.loc[frame].y for p in _]
# plot player positions
ax.plot(Xs, Ys, color=color, marker='o', markersize=PlayerMarkerSize, alpha=PlayerAlpha, linestyle="")
if include_player_velocities:
vx = [p.velocity.loc[frame, 'x'] for p in
relevant_players[team_name]] # X component of the speed vector
vy = [p.velocity.loc[frame, 'y'] for p in
relevant_players[team_name]] # Y component of the speed vector
ax.quiver(Xs, Ys, vx, vy, color=color,
scale_units='inches', scale=10., width=0.0015, headlength=5, headwidth=3,
alpha=PlayerAlpha)
if annotate:
for i, player in enumerate(relevant_players[team_name]):
ax.text(Xs[i] + 0.5, Ys[i] + 0.5, player.id, fontsize=10,
color=color)
# plot ball
if isinstance(self.__ball.positions.loc[frame], Point):
ax.plot(self.__ball.positions.loc[frame].x, self.__ball.positions.loc[frame].y, markersize=6, marker='o',
alpha=1.0, linewidth=0, color='white', linestyle="")
return fig, ax
def save_match_clip(self, sequence, path, figax=None,
field_dimen=(106.0, 68.0), include_player_velocities=False,
PlayerMarkerSize=10, PlayerAlpha=0.7, annotate=False, frame_timing=False):
""" save_match_clip( hometeam, awayteam, fpath )
Generates a movie from Metrica tracking data, saving it in the 'fpath' directory with name 'fname'
Parameters
-----------
path: path to the output file
fname: movie filename. Default is 'clip_test.mp4'
fig,ax: Can be used to pass in the (fig,ax) objects of a previously generated pitch. Set to (fig,ax) to use an existing figure, or None (the default) to generate a new pitch plot,
frames_per_second: frames per second to assume when generating the movie. Default is 25.
team_colors: Tuple containing the team colors of the home & away team. Default is 'r' (red, home team) and 'b' (blue away team)
field_dimen: tuple containing the length and width of the pitch in meters. Default is (106,68)
include_player_velocities: Boolean variable that determines whether player velocities are also plotted (as quivers). Default is False
PlayerMarkerSize: size of the individual player marlers. Default is 10
PlayerAlpha: alpha (transparency) of player markers. Defaault is 0.7
Returns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
# check that indices match first
# assert np.all(hometeam.index == awayteam.index), "Home and away team Dataframe indices must be the same"
# in which case use home team index
# index = self.__home_tracking[0].positions.index
# Set figure and movie settings
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='Tracking Data', artist='Matplotlib', comment='Metrica tracking data clip')
writer = FFMpegWriter(fps=self.__hertz, metadata=metadata)
if path[-4:] != '.mp4': path += '.mp4'
# fname = fpath + '/' + fname + '.mp4' # path and filename
# create football pitch
if figax is None:
fig, ax = self.pitch.plot()
else:
fig, ax = figax
fig.set_tight_layout(True)
# Generate movie
print("Generating movie...", end='')
with writer.saving(fig, path, 100):
for frame in sequence:
figobjs = [] # this is used to collect up all the axis objects so that they can be deleted after each iteration
relevant_players = {}
for team_name, team in zip(self.__team_names, [self.__home_tracking, self.__away_tracking]):
color = self.__colors[team_name]
# Get players on the pitch
_ = [p for p in team.values() if isinstance(p.positions.loc[frame], Point)]
relevant_players[team_name] = _
Xs = [p.positions.loc[frame].x for p in _]
Ys = [p.positions.loc[frame].y for p in _]
# Plot players position
objs, = ax.plot(Xs, Ys, color=color, marker='o', markersize=PlayerMarkerSize, alpha=PlayerAlpha,
linestyle="")
figobjs.append(objs)
if include_player_velocities:
vx = [p.velocity.loc[frame, 'x'] for p in
relevant_players[team_name]] # X component of the speed vector
vy = [p.velocity.loc[frame, 'y'] for p in
relevant_players[team_name]] # Y component of the speed vector
# vy_columns = -1 * np.array(vy_columns)
objs = ax.quiver(Xs, Ys, vx, vy, color=color,
scale_units='inches', scale=10., width=0.0015, headlength=5, headwidth=3,
alpha=PlayerAlpha)
figobjs.append(objs)
if annotate:
for i, player in enumerate(relevant_players[team_name]):
objs = ax.text(Xs[i] + 0.5, Ys[i] + 0.5, player.id, fontsize=10, color=color)
figobjs.append(objs)
# plot ball
if isinstance(self.__ball.positions.loc[frame], Point):
objs, = ax.plot(self.__ball.positions.loc[frame].x, self.__ball.positions.loc[frame].y, marker='o',
markersize=6, alpha=1.0, linewidth=0, color='white', linestyle="")
# objs, = ax.plot(team['ball_x'], team['ball_y'], 'ko', MarkerSize=6, alpha=1.0, LineWidth=0)
figobjs.append(objs)
# include time reference at the top
if not frame_timing:
frame_minute = np.int(frame / (60 * 25))
frame_second = np.int( np.floor((frame / (60 * 25) - frame_minute) * 60.))
timestring = f"{frame_minute}:{frame_second}"
else:
timestring = f"{frame}"
objs = ax.text(-2.5, field_dimen[1] / 2. + 1., timestring, fontsize=14)
figobjs.append(objs)
writer.grab_frame()
# Delete all axis objects (other than pitch lines) in preperation for next frame
for figobj in figobjs:
figobj.remove()
print("done")
plt.clf()
plt.close(fig)
def plot_events(self, event_ids, figax=None, indicators=['Marker', 'Arrow'], marker_style='o', alpha=0.5,
annotate=False):
""" plot_events( events )
Plots Metrica event positions on a football pitch. event data can be a single or several rows of a data frame.
All distances should be in meters.
Parameters
-----------
event_ids: index (or indices) for the event in the event dataframe of the match object
fig,ax: Can be used to pass in the (fig,ax) objects of a previously generated pitch. Set to (fig,ax) to use an
existing figure, or None (the default) to generate a new pitch plot,
field_dimen: tuple containing the length and width of the pitch in meters. Default is (106,68)
indicators: List containing choices on how to plot the event. 'Marker' places a marker at the 'Start X/Y'
location of the event; 'Arrow' draws an arrow from the start to end locations. Can choose one or both.
marker_style: Marker type used to indicate the event position. Default is 'o' (filled ircle).
alpha: alpha of event marker. Default is 0.5
annotate: Boolean determining whether text annotation from event data 'Type' and 'From' fields is shown on plot.
Default is False.
Returns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
if figax is None: # create new pitch
fig, ax = self.__pitch.plot()
else: # overlay on a previously generated pitch
fig, ax = figax
events = self.__events.loc[event_ids, :]
for i, row in events.iterrows():
color = self.__colors[row['Team'].casefold()]
if not pd.isna(row['Start']):
if 'Marker' in indicators:
ax.plot(row['Start'].x, row['Start'].y, color=color, marker=marker_style, alpha=alpha)
if 'Arrow' in indicators:
if not pd.isna(row['End']):
ax.annotate("", xy=row['End'].xy, xytext=row['Start'].xy,
alpha=alpha,
arrowprops=dict(alpha=alpha, width=0.5, headlength=4.0, headwidth=4.0, color=color),
annotation_clip=False)
if annotate:
text_string = row['event'] + ': ' + row['From']
ax.text(row['Start'].x, row['Start'].y, text_string, fontsize=10, color=color)
return fig, ax
def plot_pitchcontrol_for_event(self, event_id, PPCF, alpha=0.7,
include_player_velocities=True, annotate=False):
""" plot_pitchcontrol_for_event( event_id, events, tracking_home, tracking_away, PPCF )
Plots the pitch control surface at the instant of the event given by the event_id. Player and ball positions are overlaid.
Parameters
-----------
event_id: Index (not row) of the event that describes the instant at which the pitch control surface should be calculated
events: Dataframe containing the event data
tracking_home: (entire) tracking DataFrame for the Home team
tracking_away: (entire) tracking DataFrame for the Away team
PPCF: Pitch control surface (dimen (n_grid_cells_x,n_grid_cells_y) ) containing pitch control probability for the attcking team (as returned by the generate_pitch_control_for_event in Metrica_PitchControl)
alpha: alpha (transparency) of player markers. Default is 0.7
include_player_velocities: Boolean variable that determines whether player velocities are also plotted (as quivers). Default is False
annotate: Boolean variable that determines with player jersey numbers are added to the plot (default is False)
field_dimen: tuple containing the length and width of the pitch in meters. Default is (106,68)
NB: this function no longer requires xgrid and ygrid as an input
Returrns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
# pick a pass at which to generate the pitch control surface
pass_frame = self.__events.loc[event_id, 'Start Frame']
pass_team = self.__events.loc[event_id, 'Team']
# plot frame and event
fig, ax = self.__pitch.plot(field_color='white')
self.plot_frame(pass_frame, figax=(fig, ax), PlayerAlpha=alpha,
include_player_velocities=include_player_velocities, annotate=annotate)
self.plot_events(self.__events.loc[event_id:event_id], figax=(fig, ax), indicators=['Marker', 'Arrow'],
annotate=False, alpha=1)
# plot pitch control surface
if pass_team == 'Home':
cmap = 'bwr'
else:
cmap = 'bwr_r'
ax.imshow(np.flipud(PPCF),
extent=(-self.__pitch_dimension[0] / 2., self.__pitch_dimension[0] / 2.,
-self.__pitch_dimension[1] / 2., self.__pitch_dimension[1] / 2.), interpolation='spline36',
vmin=0.0, vmax=1.0, cmap=cmap, alpha=0.5)
return fig, ax
def assign_possesion(self, sd=0.4, modify_event=True, min_speed_passage=0.12, max_recaliber=1.5,
filter_spells=True, filter_tol=4):
'''
A simple function to find possession spells during the game based on the position of the ball and the players.
Substantially select the closest player to the ball as the one having possession, but also filters for dead balls and
passess. Uses a lot of information from the event data, substantially making the assumption that any possession spell
must leave a mark in the events. If modify_event is True, we use the possession data to input the frame of the
events in the event data -- this on average augments the quality of the f24 Opta data.
:param self:
:param sd:
:param modify_event:
:param min_speed_passage:
:param max_recaliber:
:param filter_spells:
:param filter_tol:
:return:
'''
# [range(half - 5, half + 5) for half in self.__halves_frame]
voluntary_toss = ['pass', 'clearance', 'short free-kick', 'crossed free-kick', 'throw-in', 'other free-kick',
'cross', 'shot', 'crossed corner', 'free-kick shot', 'keeper punch', 'goal kick']
if filter_tol <1: filter_tol = 1
events = self.__events.copy()
events['recalibrated'] = False
events['frame_safe'] = events['frame'].copy()
# Useful for re-calibrating the events' frame
distances = [[], []]
ids = [[], []]
# for i, team in enumerate([match._Match__home_tracking, match._Match__away_tracking]):
# print(f"Calculating possession for team {[match._Match__home_name, match._Match__away_name][i]}")
# for p_id, p in team.items():
# print(f"Calculating possession for player {p_id}")
# ids[i].append(p_id)
# temp = pd.concat([p.positions, match._Match__ball.positions], axis=1)
# temp.columns = ['player', 'ball']
# temp = temp.loc[(~pd.isna(temp['player'])) & (~pd.isna(temp['ball']))]
# distances[i].append(temp['ball'].distance(temp['player']))
# Calculate the distance of every player from the ball at every frame. Takes time
for i, team in enumerate([self.__home_tracking, self.__away_tracking]):
print(f"Calculating possession for team {[self.__home_name, self.__away_name][i]}")
for p_id, p in team.items():
print(f"Calculating possession for player {p_id}")
ids[i].append(p_id)
temp = pd.concat([p.positions, self.__ball.positions], axis=1)
temp.columns = ['player', 'ball']
temp = temp.loc[(~ | pd.isna(temp['player']) | pandas.isna |
import os
import sys
import math
import pandas as pd
import numpy as np
from sklearn.datasets import make_classification
from keras import backend as K
from keras import initializers, layers
from keras.utils import to_categorical
from keras.constraints import non_neg, max_norm
from keras.initializers import Zeros
from keras.constraints import Constraint
from keras import regularizers
import tensorflow as tf
from decision_tree import *
from datetime import datetime
time_cb = TimingCallback()
X, y_ = make_classification() # may want to increase complexity here
y = to_categorical(y_)
# make sure you do a test validation split!
tree = Tree() # this keeps the state of the current decision tree...
input_dim = 20
dim_size = 20
nepochs = 5 # we use nepochs=20 in paper
num_class = 2
num_trees = 5
num_rounds = 3
save_dir = "temp"
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
def gen_states(tree, tree_list=[0], target_idx=None, return_tree_list=False):
def size_0(dct):
for key, val in dct.items():
if len(val) > 0:
return False
return True
tree_index = max(tree_list)
if target_idx is None:
curr_list = [tree_index+1, tree_index+2, tree_index+3]
else:
curr_list = [tree_index+1, target_idx, tree_index+2]
tree_list.extend(curr_list)
d0, s0 = tree.prune()
d1 = tree.tree.copy()
d2, s2 = tree.graft()
if size_0(d0):
# reset
d0 = Tree().tree.copy()
state_info = {'prune': (d0, curr_list[0]),
'base': (d1, curr_list[1]),
'graft': (d2, curr_list[2]),
'state': {
'prune': s0, 'graft': s2
}}
if return_tree_list:
return state_info, tree_list, curr_list
else:
return state_info
def outputshape(input_shape):
return [(input_shape[0], input_shape[1]) for _ in range(input_shape[2])]
def normalise_pred(x):
x = tf.stack(x)
x = tf.transpose(x, [1, 0, 2])
return x
def normalise_pred_shape(input_shape):
shape = list(input_shape[0])
num_trees = len(input_shape)
return tuple([shape[0], num_trees, shape[1]])
def softmax_tau(proba, tau=0.1):
"""
This is a softmax which goes towards one-hot encoding overtime.
We want to decay tau from 1.0 to 0.1 roughly
"""
from scipy.special import logit, expit
out = expit(logit(proba)/tau)
return out/np.sum(out)
def get_layer_weights(model, name='hwy', sample=False, tau=1.0):
out = K.eval([x for x in model.layers if x.name == name][0].weights[0]).flatten()
return normalise_weights(out, sample, tau)
def normalise_weights(out, sample=False, tau=1.0):
out = np.abs(out)
out = out/np.sum(out)
if sample and tau >= 1.0:
draw = np.random.choice(range(out.shape[0]), 1, p=out)
return draw[0]
elif sample:
draw = np.random.choice(range(out.shape[0]), 1, p=softmax_tau(out, tau))
return draw[0]
elif tau >= 1.0:
return out
else:
return softmax_tau(out, tau)
def calculate_routes(adj_list=None):
"""
Calculates routes given a provided adjancency list,
assume that root node is always 0.
Assume this is a binary tree as well...
Test cases:
{0:[1, 2], 1:[], 2:[]} --> [(0, 0), (1, 0),
(0, 0), (1, 1),
(0, 1), (2, 0),
(0, 1), (2, 1)]
{0:[1], 1:[2], 2:[]} --> [(0, 0), (1, 0), (2, 0),
(0, 0), (1, 0), (2, 1),
(0, 0), (1, 1),
(0, 1)]
calculate_routes({0:[1,2], 1:[], 2:[]})
calculate_routes({0:[1], 1:[2], 2:[]})
"""
if adj_list is None:
raise Exception("Adj_list cannot be none")
def get_next(path):
next_paths = adj_list[path[-1]]
if len(next_paths) > 0:
for p in next_paths:
get_next(path + [p])
else:
all_paths.append(path)
all_paths = []
get_next([0])
# convert paths to indices...
path_indx = []
for path in all_paths:
cur_path = []
for cur_node, nxt_node in zip(path, path[1:]+[None]):
# print(cur_node, nxt_node)
pos_dir = np.array(sorted(adj_list[cur_node]))
pos_idx = np.argwhere(pos_dir==nxt_node).flatten().tolist()
if len(pos_idx) > 0 and len(pos_dir) == 2: # i.e. has 2 children
cur_path.append((cur_node, pos_idx[0]))
elif len(pos_idx) > 0 and len(pos_dir) == 1: # i.e. has 1 child
path_indx.append(cur_path + [(cur_node, 1)]) # then it will have a leaf!
cur_path.append((cur_node, pos_idx[0]))
elif nxt_node is not None:
cur_path.append((cur_node, pos_dir.shape[0]))
else:
path_indx.append(cur_path + [(cur_node, 0)])
path_indx.append(cur_path + [(cur_node, 1)])
return path_indx
def build_tree(main_input, tree, tree_list, indx, tree_number=0):
"""
Builds a single decision tree, returns all the specs needed to preserve tree state...
"""
tree_state, tree_list, curr_list = gen_states(tree, tree_list[:], indx, True)
route0 = calculate_routes(tree_state['prune'][0])
route1 = calculate_routes(tree_state['base'][0])
route2 = calculate_routes(tree_state['graft'][0])
nodes0 = list(tree_state['prune'][0].keys())
nodes1 = list(tree_state['base'][0].keys())
nodes2 = list(tree_state['graft'][0].keys())
all_nodes = list(set(nodes0 + nodes1 + nodes2))
tree_nodes_list = len(all_nodes)
route_name0 = "t{}_tree_route{}".format(tree_number, tree_state['prune'][1])
route_name1 = "t{}_tree_route{}".format(tree_number, tree_state['base'][1])
route_name2 = "t{}_tree_route{}".format(tree_number, tree_state['graft'][1])
pred_name0 = "t{}_pred_route{}".format(tree_number, tree_state['prune'][1])
pred_name1 = "t{}_pred_route{}".format(tree_number, tree_state['base'][1])
pred_name2 = "t{}_pred_route{}".format(tree_number, tree_state['graft'][1])
# create custom regularization weights based on the routes that it will be taking...
def l1_reg(weight_matrix, nodes=[nodes0, nodes1, nodes2]):
# weight matrix is shape (2, feats, nodes)
unweighted_reg = 0.02 * K.sum(K.abs(weight_matrix))
if len(nodes) == 0:
return unweighted_reg
else:
# determine weights by the routing logic...
base_weight = 0.01/len(nodes)
running_weight = 0.0
for nds in nodes:
normalizer = base_weight * (1.0/len(nds)) * (math.sqrt(len(nds))/math.sqrt(7))
for nd in nds:
running_weight += normalizer * K.sum(K.abs(weight_matrix[:, :, nd]))
return unweighted_reg-running_weight
tree_nodes = DecisionTreeNode(nodes=tree_nodes_list, regularizer=l1_reg)(main_input)
tree_r0 = DecisionTreeRouting(route=route0, name=route_name0)([main_input, tree_nodes])
tree_r1 = DecisionTreeRouting(route=route1, name=route_name1)([main_input, tree_nodes])
tree_r2 = DecisionTreeRouting(route=route2, name=route_name2)([main_input, tree_nodes])
leaf_layers0 = layers.Lambda(lambda x: [tf.squeeze(y) for y in tf.split(x, [1 for _ in range(K.int_shape(x)[2])], axis=2)], output_shape=outputshape)(tree_r0)
leaf_layers1 = layers.Lambda(lambda x: [tf.squeeze(y) for y in tf.split(x, [1 for _ in range(K.int_shape(x)[2])], axis=2)], output_shape=outputshape)(tree_r1)
leaf_layers2 = layers.Lambda(lambda x: [tf.squeeze(y) for y in tf.split(x, [1 for _ in range(K.int_shape(x)[2])], axis=2)], output_shape=outputshape)(tree_r2)
# As part of this step, we need to understand how we can general all leaf nodes; for all models, and identify leaf nodes which are shared and which ones are not.
def get_leafs(tree_info):
'''
retrieves the name of the leafs, which are:
name of the parent + name of the route (left or right)
we add random datetime so that they are retrained everyrun - it doesn't make sense that it is updated in tandem
'''
tree = Tree(tree=tree_info)
leaves = tree.get_leaves()
parent_leaves = ["p{}_l{}_t{}_{}".format(tree.get_parent(idx), idx, tree_number, datetime.now().strftime("%H%M%S")) for idx in leaves]
return parent_leaves
leaf_names0 = get_leafs(tree_state['prune'][0])
leaf_names1 = get_leafs(tree_state['base'][0])
leaf_names2 = get_leafs(tree_state['graft'][0])
leaf_names_all = set(leaf_names0 + leaf_names1 + leaf_names2)
pred_layer = {nm:Dense(num_class, activation='softmax', name=nm) for nm in leaf_names_all}
pred_layer_tree0 = [pred_layer[nm](x) for nm, x in zip(leaf_names0, leaf_layers0)]
pred_layer_tree1 = [pred_layer[nm](x) for nm, x in zip(leaf_names1, leaf_layers1)]
pred_layer_tree2 = [pred_layer[nm](x) for nm, x in zip(leaf_names2, leaf_layers2)]
stack_pred0 = layers.Lambda(normalise_pred, output_shape=normalise_pred_shape)(pred_layer_tree0)
stack_pred1 = layers.Lambda(normalise_pred, output_shape=normalise_pred_shape)(pred_layer_tree1)
stack_pred2 = layers.Lambda(normalise_pred, output_shape=normalise_pred_shape)(pred_layer_tree2)
tree_d0 = DecisionPredRouting(route=route0)([stack_pred0, tree_nodes])
tree_d1 = DecisionPredRouting(route=route1)([stack_pred1, tree_nodes])
tree_d2 = DecisionPredRouting(route=route2)([stack_pred2, tree_nodes])
highway_layer = HighwayWeights(output_dim=3, name='hwy{}'.format(tree_number))([tree_d0, tree_d1, tree_d2])
return highway_layer, tree_state, tree_list, curr_list
tree_index = 0
forest = [Tree() for idx in range(num_trees)]
main_input = Input(shape=(dim_size,), name='main_input')
tree_listing = [build_tree(main_input, forest[idx], [0], None, idx) for idx in range(num_trees)]
#t0, tree_state0, tree_list0, curr_list0 = build_tree(main_input, forest[0], [0], None, 0)
#t1, tree_state1, tree_list1, curr_list1 = build_tree(main_input, forest[1], [0], None, 1)
#t2, tree_state2, tree_list2, curr_list2 = build_tree(main_input, forest[2], [0], None, 2)
#t3, tree_state3, tree_list3, curr_list3 = build_tree(main_input, forest[3], [0], None, 3)
#t4, tree_state4, tree_list4, curr_list4 = build_tree(main_input, forest[4], [0], None, 4)
def normalise_pred2(x):
x = tf.stack(x)
x = tf.transpose(x, [1, 0, 2])
cl = K.sum(x, axis=1)
cl = cl/tf.norm(cl, ord=1, axis=1, keepdims=True)
return cl
def normalise_pred_shape2(input_shape):
shape = list(input_shape[0])
return tuple([shape[0], num_class])
stack_pred = layers.Lambda(normalise_pred2, output_shape=normalise_pred_shape2)([tl[0] for tl in tree_listing])
model = Model(inputs=[main_input], outputs=[stack_pred])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
hist = model.fit(X, y, epochs=nepochs, verbose=0)
hist_df = pd.DataFrame(hist.history)
print(pd.DataFrame(hist.history).iloc[-1])
model.save_weights(os.path.join(save_dir, 'temp_model_s.h5'))
tau = 1.0
discount = 0.99
for iters in range(num_rounds):
print("\n\nCurrent Iter: {}".format(iters))
try:
tau = tau * discount
next_idx = [get_layer_weights(model, name='hwy{}'.format(idx), tau=tau, sample=True) for idx in range(num_trees)]
#next_idx0 = get_layer_weights(model, name='hwy0', tau=tau, sample=True)
#next_idx1 = get_layer_weights(model, name='hwy1', tau=tau, sample=True)
#next_idx2 = get_layer_weights(model, name='hwy2', tau=tau, sample=True)
#next_idx3 = get_layer_weights(model, name='hwy3', tau=tau, sample=True)
#next_idx4 = get_layer_weights(model, name='hwy4', tau=tau, sample=True)
actions = ['prune', 'base', 'graft']
#print("Next idx: {}, action: {}".format(curr_list[next_idx], actions[next_idx]))
#tree0 = Tree(tree=tree_state0[actions[next_idx0]][0])
#tree1 = Tree(tree=tree_state1[actions[next_idx1]][0])
#tree2 = Tree(tree=tree_state2[actions[next_idx2]][0])
#tree3 = Tree(tree=tree_state3[actions[next_idx3]][0])
#tree4 = Tree(tree=tree_state4[actions[next_idx4]][0])
forest = [Tree(tree=tree_listing[idx][1][actions[next_idx[idx]]][0]) for idx in range(num_trees)]
main_input = Input(shape=(dim_size,), name='main_input')
tree_listing = [build_tree(main_input, forest[idx], tree_listing[idx][2], tree_listing[idx][3][next_idx[idx]], idx) for idx in range(num_trees)]
#t0, tree_state0, tree_list0, curr_list0 = build_tree(main_input, tree0, tree_list0, curr_list0[next_idx0], 0)
#t1, tree_state1, tree_list1, curr_list1 = build_tree(main_input, tree1, tree_list1, curr_list1[next_idx1], 1)
#t2, tree_state2, tree_list2, curr_list2 = build_tree(main_input, tree2, tree_list2, curr_list2[next_idx2], 2)
#t3, tree_state3, tree_list3, curr_list3 = build_tree(main_input, tree3, tree_list3, curr_list3[next_idx3], 3)
#t4, tree_state4, tree_list4, curr_list4 = build_tree(main_input, tree4, tree_list4, curr_list4[next_idx4], 4)
stack_pred = layers.Lambda(normalise_pred2, output_shape=normalise_pred_shape2)([tl[0] for tl in tree_listing])
model = Model(inputs=[main_input], outputs=[stack_pred])
model.load_weights(os.path.join(save_dir, 'temp_model_s.h5'), by_name=True)
for idx in range(num_trees):
model.get_layer('hwy{}'.format(idx)).set_weights([np.array([[0.25, 0.5, 0.25]])])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
hist = model.fit(X, y, epochs=nepochs, verbose=0)
pd_temp = pd.DataFrame(hist.history)
print(pd.DataFrame(hist.history).iloc[-1])
hist_df = | pd.concat([hist_df, pd_temp]) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
| tm.assert_equal(expected.C.dtype, 'float') | pandas.util.testing.assert_equal |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.colors import LinearSegmentedColormap
import scipy as sp
from scipy import interpolate
import patsy
import logging
from time import time
import warnings
from scipy import optimize
from scipy import linalg
from scipy import stats
from scipy.misc import derivative
from scipy.special import logsumexp
from tqdm import tqdm
def plotgene(X,mtx,draw_list,result,sp=10,lw=0.2,N=5,plotsize=5):
n = len(draw_list)
rownum = n//N + 1
plt.figure(figsize=(N*(plotsize+2),plotsize*rownum))
cmap = LinearSegmentedColormap.from_list('mycmap', ['blue','white','red'])
for i in range(n):
if draw_list[i] in list(mtx.T):
plt.subplot(rownum,N,i+1)
plt.scatter(X[:,0], X[:,1], c=mtx.T[draw_list[i]],cmap=cmap,s=sp,linewidths=lw,edgecolors='black')
plt.colorbar()
if hasattr(result, 'g'):
plt.title(draw_list[i]+' qval:'+str(round(result[result.g==draw_list[i]].qval.values[0],2)))
else:
plt.title(draw_list[i])
else:
print('not contain '+str(draw_list[i]))
def draw_agree(intersection,r1,r2,verbose=False,N=5):
r1br2=[]
r2br1=[]
all_g=[]
m1=0
m2=0
for i in intersection:
x1 = r1.index(i)
x2 = r2.index(i)
if (abs(x1-x2)>100)&verbose:
continue
plt.scatter(x1,x2)
m1 = max(m1,x1)
m2 = max(m2,x2)
if (x1-x2)>N:
r2br1.append(i)
elif (x2-x1)>N:
r1br2.append(i)
else:
all_g.append(i)
plt.annotate("(%s,%s) " %(x1,x2)+str(i), xy=(x1,x2), xytext=(-20, 10), textcoords='offset points')
plt.plot([0,m2],[N,m2+N],linestyle='-.',color='r')
plt.plot([N,m2+N],[0,m2],linestyle='-.',color='r')
plt.plot([0,m2],[10,m2+10],linestyle='-.',color='b')
plt.plot([10,m2+10],[0,m2],linestyle='-.',color='b')
plt.xlabel('original')
plt.xlim(0, m1+10)
plt.ylim(0, m2+10)
plt.ylabel('SOM')
plt.title('Rank 50'+' left_top:'+str(len(r1br2))+' right_down:'+str(len(r2br1))+' all:'+str(len(intersection)))
return r1br2,r2br1,all_g
def draw_agree_log(intersection,r1,r2,label,verbose=False,N=5,al=1000):
r1br2=[]
r2br1=[]
all_g=[]
m1=0
m2=0
x_list=[]
y_list=[]
diff=[]
plt.yscale('log')
plt.xscale('log')
plt.axis([1, al, 1, al])
for i in intersection:
x1 = r1.index(i)+1
x2 = r2.index(i)+1
x_list.append(x1)
y_list.append(x2)
diff.append(abs(x1-x2))
m1 = max(m1,x1)
m2 = max(m2,x2)
if (x1-x2)>N:
r2br1.append(i)
elif (x2-x1)>N:
r1br2.append(i)
else:
all_g.append(i)
if x1<10 and x2<10:
plt.annotate("(%s,%s) " %(x1,x2)+str(i), xy=(x1,x2), xytext=(-20, 10), textcoords='offset points')
plt.scatter(x_list,y_list,c=diff,alpha=0.5,vmin=0,vmax=400)
print(min(diff),max(diff))
plt.xlabel(label[0])
plt.ylabel(label[1])
plt.colorbar()
plt.title(label[0]+' VS '+label[1]+' all:'+str(len(intersection)))
return r1br2,r2br1,all_g
def qvalue(pv, pi0=None):
assert(pv.min() >= 0 and pv.max() <= 1), "p-values should be between 0 and 1"
original_shape = pv.shape
pv = pv.ravel() # flattens the array in place, more efficient than flatten()
m = float(len(pv))
# if the number of hypotheses is small, just set pi0 to 1
if len(pv) < 100 and pi0 is None:
pi0 = 1.0
elif pi0 is not None:
pi0 = pi0
else:
# evaluate pi0 for different lambdas
pi0 = []
lam = sp.arange(0, 0.90, 0.01)
counts = sp.array([(pv > i).sum() for i in sp.arange(0, 0.9, 0.01)])
for l in range(len(lam)):
pi0.append(counts[l]/(m*(1-lam[l])))
pi0 = sp.array(pi0)
# fit natural cubic spline
tck = interpolate.splrep(lam, pi0, k=3)
pi0 = interpolate.splev(lam[-1], tck)
if pi0 > 1:
pi0 = 1.0
assert(pi0 >= 0 and pi0 <= 1), "pi0 is not between 0 and 1: %f" % pi0
p_ordered = sp.argsort(pv)
pv = pv[p_ordered]
qv = pi0 * m/len(pv) * pv
qv[-1] = min(qv[-1], 1.0)
for i in range(len(pv)-2, -1, -1):
qv[i] = min(pi0*m*pv[i]/(i+1.0), qv[i+1])
# reorder qvalues
qv_temp = qv.copy()
qv = sp.zeros_like(qv)
qv[p_ordered] = qv_temp
# reshape qvalues
qv = qv.reshape(original_shape)
return qv
def get_l_limits(X):
Xsq = np.sum(np.square(X), 1)
R2 = -2. * np.dot(X, X.T) + (Xsq[:, None] + Xsq[None, :])
R2 = np.clip(R2, 0, np.inf)
R_vals = np.unique(R2.flatten())
R_vals = R_vals[R_vals > 1e-8]
l_min = np.sqrt(R_vals.min()) / 2.
l_max = np.sqrt(R_vals.max()) * 2.
return l_min, l_max
## Kernels ##
def SE_kernel(X, l):
Xsq = np.sum(np.square(X), 1)
R2 = -2. * np.dot(X, X.T) + (Xsq[:, None] + Xsq[None, :])
R2 = np.clip(R2, 1e-12, np.inf)
return np.exp(-R2 / (2 * l ** 2))
def linear_kernel(X):
K = np.dot(X, X.T)
return K / K.max()
def cosine_kernel(X, p):
''' Periodic kernel as l -> oo in [Lloyd et al 2014]
Easier interpretable composability with SE?
'''
Xsq = np.sum(np.square(X), 1)
R2 = -2. * np.dot(X, X.T) + (Xsq[:, None] + Xsq[None, :])
R2 = np.clip(R2, 1e-12, np.inf)
return np.cos(2 * np.pi * np.sqrt(R2) / p)
def gower_scaling_factor(K):
''' Gower normalization factor for covariance matric K
Based on https://github.com/PMBio/limix/blob/master/limix/utils/preprocess.py
'''
n = K.shape[0]
P = np.eye(n) - np.ones((n, n)) / n
KP = K - K.mean(0)[:, np.newaxis]
trPKP = np.sum(P * KP)
return trPKP / (n - 1)
def factor(K):
S, U = np.linalg.eigh(K)
# .clip removes negative eigenvalues
return U, np.clip(S, 1e-8, None)
def get_UT1(U):
return U.sum(0)
def get_UTy(U, y):
return y.dot(U)
def mu_hat(delta, UTy, UT1, S, n, Yvar=None):
''' ML Estimate of bias mu, function of delta.
'''
if Yvar is None:
Yvar = np.ones_like(S)
UT1_scaled = UT1 / (S + delta * Yvar)
sum_1 = UT1_scaled.dot(UTy)
sum_2 = UT1_scaled.dot(UT1)
return sum_1 / sum_2
def s2_t_hat(delta, UTy, S, n, Yvar=None):
''' ML Estimate of structured noise, function of delta
'''
if Yvar is None:
Yvar = np.ones_like(S)
UTy_scaled = UTy / (S + delta * Yvar)
return UTy_scaled.dot(UTy) / n
def LL(delta, UTy, UT1, S, n, Yvar=None):
''' Log-likelihood of GP model as a function of delta.
The parameter delta is the ratio s2_e / s2_t, where s2_e is the
observation noise and s2_t is the noise explained by covariance
in time or space.
'''
mu_h = mu_hat(delta, UTy, UT1, S, n, Yvar)
if Yvar is None:
Yvar = np.ones_like(S)
sum_1 = (np.square(UTy - UT1 * mu_h) / (S + delta * Yvar)).sum()
sum_2 = np.log(S + delta * Yvar).sum()
with np.errstate(divide='ignore'):
return -0.5 * (n * np.log(2 * np.pi) + n * np.log(sum_1 / n) + sum_2 + n)
def logdelta_prior_lpdf(log_delta):
s2p = 100.
return -np.log(np.sqrt(2 * np.pi * s2p)) - np.square(log_delta - 20.) / (2 * s2p)
def make_objective(UTy, UT1, S, n, Yvar=None):
def LL_obj(log_delta):
return -LL(np.exp(log_delta), UTy, UT1, S, n, Yvar)
return LL_obj
def brent_max_LL(UTy, UT1, S, n):
LL_obj = make_objective(UTy, UT1, S, n)
o = optimize.minimize_scalar(LL_obj, bounds=[-10, 10], method='bounded', options={'maxiter': 32})
max_ll = -o.fun
max_delta = np.exp(o.x)
max_mu_hat = mu_hat(max_delta, UTy, UT1, S, n)
max_s2_t_hat = s2_t_hat(max_delta, UTy, S, n)
return max_ll, max_delta, max_mu_hat, max_s2_t_hat
def lbfgsb_max_LL(UTy, UT1, S, n, Yvar=None):
LL_obj = make_objective(UTy, UT1, S, n, Yvar)
min_boundary = -10
max_boundary = 20.
x, f, d = optimize.fmin_l_bfgs_b(LL_obj, 0., approx_grad=True,
bounds=[(min_boundary, max_boundary)],
maxfun=64, factr=1e12, epsilon=1e-4)
max_ll = -f
max_delta = np.exp(x[0])
boundary_ll = -LL_obj(max_boundary)
if boundary_ll > max_ll:
max_ll = boundary_ll
max_delta = np.exp(max_boundary)
boundary_ll = -LL_obj(min_boundary)
if boundary_ll > max_ll:
max_ll = boundary_ll
max_delta = np.exp(min_boundary)
max_mu_hat = mu_hat(max_delta, UTy, UT1, S, n, Yvar)
max_s2_t_hat = s2_t_hat(max_delta, UTy, S, n, Yvar)
s2_logdelta = 1. / (derivative(LL_obj, np.log(max_delta), n=2) ** 2)
return max_ll, max_delta, max_mu_hat, max_s2_t_hat, s2_logdelta
def search_max_LL(UTy, UT1, S, n, num=32):
''' Search for delta which maximizes log likelihood.
'''
min_obj = np.inf
max_log_delta = np.nan
LL_obj = make_objective(UTy, UT1, S, n)
for log_delta in np.linspace(start=-10, stop=20, num=num):
cur_obj = LL_obj(log_delta)
if cur_obj < min_obj:
min_obj = cur_obj
max_log_delta = log_delta
max_delta = np.exp(max_log_delta)
max_mu_hat = mu_hat(max_delta, UTy, UT1, S, n)
max_s2_t_hat = s2_t_hat(max_delta, UTy, S, n)
max_ll = -min_obj
return max_ll, max_delta, max_mu_hat, max_s2_t_hat
def make_FSV(UTy, S, n, Gower):
def FSV(log_delta):
s2_t = s2_t_hat(np.exp(log_delta), UTy, S, n)
s2_t_g = s2_t * Gower
return s2_t_g / (s2_t_g + np.exp(log_delta) * s2_t)
return FSV
def lengthscale_fits(exp_tab, U, UT1, S, Gower, num=64):
''' Fit GPs after pre-processing for particular lengthscale
'''
results = []
n, G = exp_tab.shape
for g in tqdm(range(G), leave=False):
y = exp_tab.iloc[:, g]
UTy = get_UTy(U, y)
t0 = time()
max_reg_ll, max_delta, max_mu_hat, max_s2_t_hat, s2_logdelta = lbfgsb_max_LL(UTy, UT1, S, n)
max_ll = max_reg_ll
t = time() - t0
# Estimate standard error of Fraction Spatial Variance
FSV = make_FSV(UTy, S, n, Gower)
s2_FSV = derivative(FSV, np.log(max_delta), n=1) ** 2 * s2_logdelta
results.append({
'g': exp_tab.columns[g],
'max_ll': max_ll,
'max_delta': max_delta,
'max_mu_hat': max_mu_hat,
'max_s2_t_hat': max_s2_t_hat,
'time': t,
'n': n,
'FSV': FSV(np.log(max_delta)),
's2_FSV': s2_FSV,
's2_logdelta': s2_logdelta
})
return | pd.DataFrame(results) | pandas.DataFrame |
import pandas as pd
from evaluate.calculator import (
RecallCalculator,
PrecisionCalculator,
EmptyReportError,
)
import pytest
from unittest.mock import patch, Mock
from evaluate.report import (
Report,
PrecisionReport,
RecallReport
)
from tests.common import create_precision_report_row
from io import StringIO
class TestPrecisionCalculator:
def test_calculatePrecision_NoReportsRaisesEmptyReportError(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(columns=columns)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
with pytest.raises(EmptyReportError):
calculator._calculate_precision_for_a_given_confidence()
def test_calculatePrecision_OneReportWithOneRowCompletelyCorrectReturnsOne(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[create_precision_report_row(1.0, gt_conf=100)], columns=columns
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
actual = calculator._calculate_precision_for_a_given_confidence()
assert actual.precision == 1.0
assert actual.true_positives == 1.0
assert actual.total == 1.0
def test_calculatePrecision_OneReportWithOneRowCompletelyIncorrectReturnsZero(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[create_precision_report_row(0.0, gt_conf=100)], columns=columns
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
actual = calculator._calculate_precision_for_a_given_confidence()
assert actual.precision == 0.0
assert actual.true_positives == 0.0
assert actual.total == 1.0
def test_calculatePrecision_OneReportWithOneRowCompletelyCorrectBelowConfThreasholdRaisesEmptyReportError(
self
):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[create_precision_report_row(1.0, gt_conf=10)], columns=columns
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
confidence_threshold = 60
with pytest.raises(EmptyReportError):
calculator._calculate_precision_for_a_given_confidence(confidence_threshold)
def test_calculatePrecision_OneReportWithOneRowCompletelyCorrectEqualConfThreasholdReturnsOne(
self
):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[create_precision_report_row(1.0, gt_conf=60)], columns=columns
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
confidence_threshold = 60
actual = calculator._calculate_precision_for_a_given_confidence(confidence_threshold)
assert actual.precision == 1.0
assert actual.true_positives == 1.0
assert actual.total == 1.0
def test_calculatePrecision_OneReportWithTwoRowsPartiallyCorrect(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[
create_precision_report_row(0.5, gt_conf=100),
create_precision_report_row(0.7, gt_conf=100),
],
columns=columns,
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
actual = calculator._calculate_precision_for_a_given_confidence()
assert actual.precision == 1.2/2
assert actual.true_positives == 1.2
assert actual.total == 2.0
def test_calculatePrecision_OneReportWithThreeRowsTwoPartiallyCorrectOneBelowThreshold(
self
):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[
create_precision_report_row(0.4, gt_conf=100),
create_precision_report_row(0.8, gt_conf=20),
create_precision_report_row(0.3, gt_conf=100),
],
columns=columns,
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
confidence_threshold = 80
actual = calculator._calculate_precision_for_a_given_confidence(confidence_threshold)
assert actual.precision == 0.7/2.0
assert actual.true_positives == 0.7
assert actual.total == 2.0
class TestRecallCalculator:
@patch.object(Report, Report.get_classifications_as_list.__name__, return_value=[
"unmapped", "partially_mapped", "primary_correct", "primary_incorrect",
"secondary_correct", "secondary_incorrect", "supplementary_correct",
"supplementary_incorrect"
])
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
@patch.object(RecallReport, RecallReport.get_number_of_truth_probes.__name__, return_value=8)
def test____calculate_info_wrt_truth_probes___one_classification_of_each(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
true_positives, number_of_truth_probes = RecallCalculator._calculate_info_wrt_truth_probes(report)
assert true_positives==3 and number_of_truth_probes==8
@patch.object(Report, Report.get_classifications_as_list.__name__, return_value=[
"unmapped", "partially_mapped", "primary_correct", "primary_incorrect",
"secondary_correct", "secondary_incorrect", "supplementary_correct",
"supplementary_incorrect", "partially_mapped", "partially_mapped",
"primary_correct", "primary_correct", "primary_correct",
"supplementary_incorrect", "supplementary_incorrect", "supplementary_incorrect",
"unmapped", "unmapped", "unmapped",
])
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
@patch.object(RecallReport, RecallReport.get_number_of_truth_probes.__name__, return_value=19)
def test____calculate_info_wrt_truth_probes___some_duplicated_classifications(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
true_positives, number_of_truth_probes = RecallCalculator._calculate_info_wrt_truth_probes(report)
assert true_positives == 6 and number_of_truth_probes == 19
@patch.object(RecallReport, RecallReport.get_proportion_of_allele_seqs_found_for_each_variant.__name__,
return_value=[1.0, 0.5, 0.8, 1.0, 0.9, 1.0, 0.0, 0.1, 1.0])
@patch.object(RecallReport, RecallReport.get_proportion_of_alleles_found_for_each_variant.__name__,
return_value=[0.0, 0.1, 0.2, 0.3, 1.0, 0.9, 0.8, 0.7, 0.6])
@patch.object(RecallReport, RecallReport.get_number_of_variants.__name__, return_value=20)
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
def test____calculate_info_wrt_variants(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
nb_variants_where_all_allele_seqs_were_found, nb_variants_found_wrt_alleles, variants_total = \
RecallCalculator._calculate_info_wrt_variants(report)
assert nb_variants_where_all_allele_seqs_were_found == 6.3 and \
nb_variants_found_wrt_alleles == 4.6 and \
variants_total == 20
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
@patch.object(Report, Report.get_report_satisfying_confidence_threshold.__name__)
@patch.object(RecallCalculator, RecallCalculator._calculate_info_wrt_truth_probes.__name__, return_value=(5, 10))
@patch.object(RecallCalculator, RecallCalculator._calculate_info_wrt_variants.__name__, return_value=(4, 8, 10))
def test____calculate_recall_for_a_given_confidence(self, calculate_info_wrt_variants_mock,
calculate_info_wrt_truth_probes_mock,
get_report_satisfying_confidence_threshold_mock,
*other_mocks):
# setup
report_satisfying_confidence_threshold_mock = Mock()
get_report_satisfying_confidence_threshold_mock.return_value = report_satisfying_confidence_threshold_mock
report = RecallReport([pd.DataFrame()], False)
calculator = RecallCalculator(report)
recall_info_actual = calculator._calculate_recall_for_a_given_confidence(100)
get_report_satisfying_confidence_threshold_mock.assert_called_once_with(100)
calculate_info_wrt_truth_probes_mock.assert_called_once_with(report_satisfying_confidence_threshold_mock)
calculate_info_wrt_variants_mock.assert_called_once_with(report_satisfying_confidence_threshold_mock)
assert recall_info_actual.truth_probes_true_positives == 5
assert recall_info_actual.truth_probes_total == 10
assert recall_info_actual.nb_variants_where_all_allele_seqs_were_found == 4
assert recall_info_actual.nb_variants_found_wrt_alleles == 8
assert recall_info_actual.variants_total == 10
assert recall_info_actual.recall_wrt_truth_probes == 0.5
assert recall_info_actual.recall_wrt_variants_where_all_allele_seqs_were_found == 0.4
assert recall_info_actual.recall_wrt_variants_found_wrt_alleles == 0.8
@patch.object(RecallReport, RecallReport.get_proportion_of_allele_seqs_found_for_each_variant_with_nb_of_samples.__name__,
return_value=
pd.read_csv(StringIO(
"""PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES
0,1,3
1,0,5
2,0,7
3,1,5
4,0,5
5,1,3
6,0,5
"""
), index_col="PVID"))
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
def test___get_recall_allele_seqs_vs_nb_of_samples_report(self, *mocks):
report = RecallReport([ | pd.DataFrame() | pandas.DataFrame |
#/Library/Frameworks/Python.framework/Versions/3.6/bin/python3
#
# Author: <NAME>
# Date: 2018-09-26
#
# This script runs all the models on Baxter Dataset subset of onlt cancer and normal samples to predict diagnosis based on OTU data only. This script only evaluates generalization performance of the model.
#
############################# IMPORT MODULES ##################################
import matplotlib
matplotlib.use('Agg') #use Agg backend to be able to use Matplotlib in Flux
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sympy
from scipy import interp
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from timeit import default_timer as timer
#################################################################################
############################# PRE-PROCESS DATA ##################################
# Import the module I wrote preprocess_data
# In this case we will only need the function process_multidata which preprocesses shared and subsampled mothur generated OTU table and the metadata.This function will give us OTUs and FIT as features, diagnosis as labels.
# If we wanted to use only OTUs and not FIT as a feature, import the function process_data and use that.
#################################################################################
from preprocess_data import process_SRNdata
shared = pd.read_table("data/baxter.0.03.subsample.shared")
meta = | pd.read_table("data/metadata.tsv") | pandas.read_table |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 12:15:41 2018
@author: nmei
"""
import pandas as pd
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from statsmodels.graphics.factorplots import interaction_plot
import matplotlib.pyplot as plt
from scipy import stats
from utils import post_processing2,eta_squared,omega_squared,multiple_pairwise_comparison
import os
working_dir = '../results/'
pd.options.mode.chained_assignment = None
pos = pd.read_csv('../results/Pos_control.csv')
att = | pd.read_csv('../results/ATT_control.csv') | pandas.read_csv |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import argparse
from collections import OrderedDict
from copy import deepcopy
from glob import glob
import itertools
import os.path as op
from os import environ
import sys
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
from scipy.signal import (
butter,
filtfilt,
)
from scipy.io import loadmat
from scipy.spatial import distance
import seaborn as sns
from sklearn.metrics import cohen_kappa_score
from datalad.api import get as datalad_get
from remodnav import (
EyegazeClassifier,
clf as CLF,
)
from remodnav.tests import utils as ut
# configure matplotlib for deterministically named SVG identifiers.
from matplotlib import rcParams
rcParams['svg.hashsalt'] = 43
#from remodnav.tests.test_labeled import load_data as load_anderson
def load_anderson(category, name):
fname = op.join(*(
('remodnav', 'remodnav', 'tests', 'data', 'anderson_etal',
'annotated_data') + \
(('fix_by_Zemblys2018',)
if name == 'UH29_img_Europe_labelled_FIX_MN.mat'
else ('data used in the article', category)
) + \
(name + ('' if name.endswith('.mat') else '.mat'),))
)
datalad_get(fname)
m = loadmat(fname)
# viewing distance
vdist = m['ETdata']['viewDist'][0][0][0][0]
screen_width = m['ETdata']['screenDim'][0][0][0][0]
screen_res = m['ETdata']['screenRes'][0][0][0][0]
px2deg = CLF.deg_per_pixel(screen_width, vdist, screen_res)
sr = float(m['ETdata']['sampFreq'][0][0][0][0])
data = np.rec.fromarrays([
m['ETdata']['pos'][0][0][:, 3],
m['ETdata']['pos'][0][0][:, 4]],
names=('x', 'y'))
data[np.logical_and(data['x'] == 0, data['y'] == 0)] = (np.nan, np.nan)
labels = m['ETdata']['pos'][0][0][:, 5]
label_remap = {
1: 'FIXA',
2: 'SACC',
3: 'PSO',
4: 'PURS',
}
events = []
ev_type = None
ev_start = None
for i in range(len(labels)):
s = labels[i]
if ev_type is None and s in label_remap.keys():
ev_type = s
ev_start = i
elif ev_type is not None and s != ev_type:
events.append(dict(
id=len(events),
label=label_remap.get(ev_type),
start_time=0.0 if ev_start is None else
float(ev_start) / sr,
end_time=float(i) / sr,
))
ev_type = s if s in label_remap.keys() else None
ev_start = i
if ev_type is not None:
events.append(dict(
id=len(events),
label=label_remap.get(ev_type),
start_time=0.0 if ev_start is None else
float(ev_start) / sr,
end_time=float(i) / sr,
))
return data, labels, events, px2deg, sr
labeled_files = {
'dots': [
'TH20_trial1_labelled_{}.mat',
'TH38_trial1_labelled_{}.mat',
'TL22_trial17_labelled_{}.mat',
'TL24_trial17_labelled_{}.mat',
'UH21_trial17_labelled_{}.mat',
'UH21_trial1_labelled_{}.mat',
'UH25_trial1_labelled_{}.mat',
'UH33_trial17_labelled_{}.mat',
'UL27_trial17_labelled_{}.mat',
'UL31_trial1_labelled_{}.mat',
'UL39_trial1_labelled_{}.mat',
],
'img': [
'TH34_img_Europe_labelled_{}.mat',
'TH34_img_vy_labelled_{}.mat',
'TL20_img_konijntjes_labelled_{}.mat',
'TL28_img_konijntjes_labelled_{}.mat',
'UH21_img_Rome_labelled_{}.mat',
'UH27_img_vy_labelled_{}.mat',
'UH29_img_Europe_labelled_{}.mat',
'UH33_img_vy_labelled_{}.mat',
'UH47_img_Europe_labelled_{}.mat',
'UL23_img_Europe_labelled_{}.mat',
'UL31_img_konijntjes_labelled_{}.mat',
'UL39_img_konijntjes_labelled_{}.mat',
'UL43_img_Rome_labelled_{}.mat',
'UL47_img_konijntjes_labelled_{}.mat',
],
'video': [
'TH34_video_BergoDalbana_labelled_{}.mat',
'TH38_video_dolphin_fov_labelled_{}.mat',
'TL30_video_triple_jump_labelled_{}.mat',
'UH21_video_BergoDalbana_labelled_{}.mat',
'UH29_video_dolphin_fov_labelled_{}.mat',
'UH47_video_BergoDalbana_labelled_{}.mat',
'UL23_video_triple_jump_labelled_{}.mat',
'UL27_video_triple_jump_labelled_{}.mat',
'UL31_video_triple_jump_labelled_{}.mat',
],
}
#make label_map global - we need it twice
label_map = {
'FIXA': 'FIX',
'FIX': 'FIX',
'SACC': 'SAC',
'ISAC': 'SAC',
'HPSO': 'PSO',
'IHPS': 'PSO',
'LPSO': 'PSO',
'ILPS': 'PSO',
'PURS': 'PUR',
}
# we need the distribution parameters of all algorithms and human coders
# in tables 3, 4, 5, 6 from Andersson et al., 2017. Well worth double-checking,
# I needed to hand-copy-paste from the paper. The summary statistics were made
# publicly available in the file # matlab_analysis_code/20150807.mat in the
# original authors GitHub repository
# (https://github.com/richardandersson/EyeMovementDetectorEvaluation/blob/0e6f82708e10b48039763aa1078696e802260674/matlab_analysis_code/20150807.mat).
# The first two entries within each value-list belong to human coders
image_params = {
"FIX": {
'alg': ['MN', 'RA', 'CDT', 'IDT', 'IKF', 'IMST', 'IHMM', 'IVT', 'NH', 'BIT'],
'mn': [248, 242, 397, 399, 174, 304, 133, 114, 258, 209],
'sd': [271, 273, 559, 328, 239, 293, 216, 204, 299, 136],
'no': [380, 369, 251, 242, 513, 333, 701, 827, 292, 423]
},
"SAC": {
'alg': ['MN', 'RA', 'EM', 'IDT', 'IKF', 'IMST', 'IHMM', 'IVT', 'NH', 'LNS'],
'mn': [30, 31, 25, 35, 62, 17, 48, 41, 50, 29],
'sd': [17, 15, 22, 15, 37, 10, 26, 22, 20, 12],
'no': [376, 372, 787, 258, 353, 335, 368, 373, 344, 390]
},
"PSO" : {
'alg': ['MN', 'RA', 'NH', 'LNS'],
'mn': [21, 21, 28, 25],
'sd': [11, 9, 13, 9],
'no': [312, 309, 237, 319]
},
"PUR": {
'alg': ['MN', 'RA'],
'mn': [363, 305],
'sd': [187, 184],
'no': [3, 16]
}
}
dots_params = {
"FIX": {
'alg': ['MN', 'RA', 'CDT', 'IDT', 'IKF', 'IMST', 'IHMM', 'IVT', 'NH', 'BIT'],
'mn': [161, 131, 60, 323, 217, 268, 214, 203, 380, 189],
'sd': [30, 99, 127, 146, 184, 140, 286, 282, 333, 113],
'no': [2, 13, 165, 8, 72, 12, 67, 71, 30, 67]
},
"SAC": {
'alg': ['MN', 'RA', 'EM', 'IDT', 'IKF', 'IMST', 'IHMM', 'IVT', 'NH', 'LNS'],
'mn': [23, 22, 17, 32, 60, 13, 41, 36, 43, 26],
'sd': [10, 11, 14, 14, 26, 5, 17, 14, 16, 11],
'no': [47, 47, 93, 10, 29, 18, 27, 28, 42, 53]
},
"PSO" : {
'alg': ['MN', 'RA', 'NH', 'LNS'],
'mn': [15, 15, 24, 20],
'sd': [5, 8, 12, 9],
'no': [33, 28, 17, 31]
},
"PUR": {
'alg': ['MN', 'RA'],
'mn': [375, 378],
'sd': [256, 364],
'no': [37, 33]
}
}
video_params = {
"FIX": {
'alg': ['MN', 'RA', 'CDT', 'IDT', 'IKF', 'IMST', 'IHMM', 'IVT', 'NH', 'BIT'],
'mn': [318, 240, 213, 554, 228, 526, 234, 202, 429, 248],
'sd': [289, 189, 297, 454, 296, 825, 319, 306, 336, 215],
'no': [67, 67, 211, 48, 169, 71, 194, 227, 83, 170]
},
"SAC": {
'alg': ['MN', 'RA', 'EM', 'IDT', 'IKF', 'IMST', 'IHMM', 'IVT', 'NH', 'LNS'],
'mn': [26, 25, 20, 24, 55, 18, 42, 36, 44, 28],
'sd': [13, 12, 16, 53, 20, 10, 18, 16, 18, 12],
'no': [116, 126, 252, 41, 107, 76, 109, 112, 1104, 122]
},
"PSO" : {
'alg': ['MN', 'RA', 'NH', 'LNS'],
'mn': [20, 17, 28, 24],
'sd': [11, 8, 13, 10],
'no': [97, 89, 78, 87]
},
"PUR": {
'alg': ['MN', 'RA'],
'mn': [521, 472],
'sd': [347, 319],
'no': [50, 68]
}
}
mri_ids = ['01', '02', '03', '04', '05', '06', '09', '10', '14', '15',
'16', '17', '18', '19', '20']
lab_ids = ['22', '23', '24', '25', '26', '27', '28', '29', '30', '31',
'32', '33', '34', '35', '36']
# this used to be within confusion(), is global now because we also need it for Kappa()
# --> defines mapping between remodnav labels (strings) and andersson labels (ints)
anderson_remap = {
'FIX': 1,
'SAC': 2,
'PSO': 3,
'PUR': 4,
}
def get_durations(events, evcodes):
events = [e for e in events if e['label'] in evcodes]
# TODO minus one sample at the end?
durations = [e['end_time'] - e['start_time'] for e in events]
return durations
def confusion(refcoder,
coder,
figures,
stats):
conditions = ['FIX', 'SAC', 'PSO', 'PUR']
#conditions = ['FIX', 'SAC', 'PSO']
plotter = 1
# initialize a maximum misclassification rate, to later automatically reference,
max_mclf = 0
# coders are in axis labels too
#plt.suptitle('Jaccard index for movement class labeling {} vs. {}'.format(
# refcoder, coder))
for stimtype, stimlabel in (
('img', 'Images'),
('dots', 'Dots'),
('video', 'Videos')):
conf = np.zeros((len(conditions), len(conditions)), dtype=float)
jinter = np.zeros((len(conditions), len(conditions)), dtype=float)
junion = np.zeros((len(conditions), len(conditions)), dtype=float)
for fname in labeled_files[stimtype]:
labels = []
data = None
px2deg = None
sr = None
for src in (refcoder, coder):
if src in ('RA', 'MN'):
finame = fname.format(src)
if finame == 'UH29_img_Europe_labelled_MN.mat':
# pick up Zemblys fix
finame = 'UH29_img_Europe_labelled_FIX_MN.mat'
data, target_labels, target_events, px2deg, sr = \
load_anderson(stimtype, finame)
labels.append(target_labels.astype(int))
else:
clf = EyegazeClassifier(
px2deg=px2deg,
sampling_rate=sr,
)
p = clf.preproc(data)
events = clf(p)
# convert event list into anderson-style label array
l = np.zeros(labels[0].shape, labels[0].dtype)
for ev in events:
l[int(ev['start_time'] * sr):int((ev['end_time']) * sr)] = \
anderson_remap[label_map[ev['label']]]
labels.append(l)
nlabels = [len(l) for l in labels]
if len(np.unique(nlabels)) > 1:
rsout(
"% #\n% # %INCONSISTENCY Found label length mismatch "
"between coders ({}, {}) for: {}\n% #\n".format(
refcoder, coder, fname))
rsout('% Truncate labels to shorter sample: {}'.format(
nlabels))
order_idx = np.array(nlabels).argsort()
labels[order_idx[1]] = \
labels[order_idx[1]][:len(labels[order_idx[0]])]
for c1, c1label in enumerate(conditions):
for c2, c2label in enumerate(conditions):
intersec = np.sum(np.logical_and(
labels[0] == anderson_remap[c1label],
labels[1] == anderson_remap[c2label]))
union = np.sum(np.logical_or(
labels[0] == anderson_remap[c1label],
labels[1] == anderson_remap[c2label]))
jinter[c1, c2] += intersec
junion[c1, c2] += union
#if c1 == c2:
# continue
conf[c1, c2] += np.sum(np.logical_and(
labels[0] == anderson_remap[c1label],
labels[1] == anderson_remap[c2label]))
nsamples = np.sum(conf)
nsamples_nopurs = np.sum(conf[:3, :3])
# zero out diagonal for bandwidth
conf *= ((np.eye(len(conditions)) - 1) * -1)
if figures:
plt.subplot(1, 3, plotter)
sns.heatmap(
#(conf / nsamples) * 100,
jinter / junion,
square=True,
annot=True,
cmap=sns.cm.rocket_r,
xticklabels=conditions,
yticklabels=conditions,
vmin=0.0,
vmax=1.0,
)
plt.xlabel('{} labeling'.format(refcoder))
plt.ylabel('{} labeling'.format(coder))
# stats are given proper below
#plt.title('"{}" (glob. misclf-rate): {:.1f}% (w/o pursuit: {:.1f}%)'.format(
# stimtype,
# (np.sum(conf) / nsamples) * 100,
# (np.sum(conf[:3, :3]) / nsamples_nopurs) * 100))
plt.title(stimlabel)
plotter += 1
msclf_refcoder = dict(zip(conditions, conf.sum(axis=1)/conf.sum() * 100))
msclf_coder = dict(zip(conditions, conf.sum(axis=0)/conf.sum() * 100))
if stats:
# print results as LaTeX commands
label_prefix = '{}{}{}'.format(stimtype, refcoder, coder)
for key, format, value in (
('MCLF', '%.1f', (np.sum(conf) / nsamples) * 100),
('MclfWOP', '%.1f',
(np.sum(conf[:3, :3]) / nsamples_nopurs) * 100),
('FIXref', '%.0f', msclf_refcoder['FIX']),
('SACref', '%.0f', msclf_refcoder['SAC']),
('PSOref', '%.0f', msclf_refcoder['PSO']),
('SPref', '%.0f', msclf_refcoder['PUR']),
('FIXcod', '%.0f', msclf_coder['FIX']),
('SACcod', '%.0f', msclf_coder['SAC']),
('PSOcod', '%.0f', msclf_coder['PSO']),
('SPcod', '%.0f', msclf_coder['PUR'])):
rsout('\\newcommand{\\%s%s}{%s}'
% (label_prefix, key, format % value))
# update classification performance only if there sth worse
if (np.sum(conf[:3, :3]) / nsamples_nopurs * 100) > max_mclf:
max_mclf = (np.sum(conf[:3, :3]) / nsamples_nopurs * 100)
# print original outputs, but make them LaTeX-safe with '%'. This
# should make it easier to check correct placements of stats in the
# table
rsout('% ### {}'.format(stimtype))
rsout('% Comparison | MCLF | MCLFw/oP | Method | Fix | Sacc | PSO | SP')
rsout('% --- | --- | --- | --- | --- | --- | --- | ---')
rsout('% {} v {} | {:.1f} | {:.1f} | {} | {:.0f} | {:.0f} | {:.0f} | {:.0f}'.format(
refcoder,
coder,
(np.sum(conf) / nsamples) * 100,
(np.sum(conf[:3, :3]) / nsamples_nopurs) * 100,
refcoder,
msclf_refcoder['FIX'],
msclf_refcoder['SAC'],
msclf_refcoder['PSO'],
msclf_refcoder['PUR'],
))
rsout('% -- | -- | -- | {} | {:.0f} | {:.0f} | {:.0f} | {:.0f}'.format(
coder,
msclf_coder['FIX'],
msclf_coder['SAC'],
msclf_coder['PSO'],
msclf_coder['PUR'],
))
return max_mclf
def mk_confusion_figures(fig, stat):
"""
small helper function to save all confusion matrices
"""
max_mclf = 0
for pair in itertools.combinations(['MN', 'RA', 'AL'], 2):
plt.figure(
# fake size to get the font size down in relation
figsize=(14, 3),
dpi=120,
frameon=False)
cur_max_mclf = confusion(pair[0],
pair[1],
fig,
stat)
plt.savefig(
op.join('img', 'confusion_{}_{}.svg'.format(*pair)),
transparent=True,
bbox_inches="tight",
metadata={'Date': None})
plt.close()
if cur_max_mclf > max_mclf:
max_mclf = cur_max_mclf
if stat:
rsout('\\newcommand{\\maxmclf}{%s}'
% ('%.1f' % max_mclf))
def quality_stats():
"""
Computes the percent of signal loss in raw data
Note: takes a while to run (30 subj. x 8 runs x 15 min rec with 1000Hz sr),
therefore, I'm just adding results here for now
and save the resulting histogram to the repository.
\newcommand{\avglosslab}{0.041005777923189775}
\newcommand{\avglossmri}{0.1507901497174581}
To include this computation in a run with Make, add this function to the
list of command invocations if the script is ran from the command line at the
end of the script.
"""
datapath_mri = op.join('data', 'raw_eyegaze', 'sub-*', 'ses-movie', 'func',
'sub-*_ses-movie_task-movie_run-*_recording-eyegaze_physio.tsv.gz')
datapath_lab = op.join('data', 'raw_eyegaze', 'sub-*', 'beh',
'sub-*_task-movie_run-*_recording-eyegaze_physio.tsv.gz')
for (data, assoc) in [(datapath_lab, 'lab'),
(datapath_mri, 'mri')]:
infiles = glob(data)
for f in infiles:
datalad_get(f)
# make sure we have 15 subjects' data
assert len(infiles) == 120
print("Currently processing data from {} sample".format(assoc))
# set sampling rate and px2deg
px2deg = 0.0266711972026 if assoc == 'lab' else 0.0185581232561
sr = 1000
# calculate percent signal loss across subjects and runs
losses = []
vels = []
for f in infiles:
data = np.recfromcsv(f,
delimiter='\t',
names=['x', 'y', 'pupil', 'frame'])
# all periods of signal loss are marked as nan in the data
signal_loss = np.sum(np.isnan(data['x'])) / len(data['x'])
losses.append(signal_loss)
velocities = cal_velocities(data=data,
px2deg=px2deg,
sr=sr)
vels.append(velocities)
print("Calculated velocities and losses for {} sample".format(assoc))
# average across signal losses in sample (mri or lab)
loss = np.nanmean(losses)
# print results as Latex command using 'assoc' as sample identifier in name
label_loss = 'avgloss{}'.format(assoc)
rsout('\\newcommand{\\%s}{%s}'
% (label_loss, loss))
# vels is a list of arrays atm
v = np.concatenate(vels).ravel()
if assoc == 'lab':
v_lab = v
elif assoc == 'mri':
v_mri = v
# plot velocities in a histogram on logscale
# create non-linear non-equal bin sizes, as x axis will be log
hist, bins, _ = plt.hist(v[~np.isnan(v)], bins=40)
plt.close()
logbins = np.logspace(1, # don't start with 0, does not make sense in logspace
np.log10(bins[-1]),
len(bins))
fig, ax = plt.subplots()
fig.set_figheight(3)
fig.set_figwidth(5)
ax.set_ylabel('frequency')
ax.set_xlabel('velocities (deg/s)')
plt.hist(v_mri[~np.isnan(v_mri)],
weights=np.zeros_like(v_mri[~np.isnan(v_mri)]) + 1. / (v_mri[~np.isnan(v_mri)]).size,
bins=logbins,
histtype='bar',
color='orangered',
alpha=0.5,
label='mri')
plt.hist(v_lab[~np.isnan(v_lab)],
weights=np.zeros_like(v_lab[~np.isnan(v_lab)]) + 1. / (v_lab[~np.isnan(v_lab)]).size,
bins=logbins,
histtype='bar',
color='darkslategrey',
alpha=0.5,
label='lab')
plt.legend(loc='upper right')
plt.xscale('log')
plt.savefig(op.join('img', 'velhist.svg'),
transparent=True,
bbox_inches="tight",
metadata={'Date': None})
def S2SRMS():
"""
A function to compute sample-to-sample RMS following the approach in
Hooge et al., 2017
(https://link.springer.com/content/pdf/10.3758/s13428-017-0955-x.pdf),
as requested by reviewer 2.
The idea behind the approach is to chunk the entire signal into windows and
to compute sample to sample root mean squared distances for all the windows.
Procedurally, in each window S2S-RMS are computed as the root average of
squared distances between consecutive samples. After the window-wise computation,
the median S2S-RMS within each "trial" (here: 15min run) is taken, and all
median values are averaged. Windows in our context were 1000ms/1000 samples.
The code below provides an implementation of this request, and the results.
As we lay out in our response to the editor and the reviewer, though, we
believe that the method reviewer 2 suggests, a precision estimation based on
sample-to-sample distances in the entire signal, is a misguided method for
professionally produced Hollywood movies. The substantial gaze control
exerted by professionally produced movies (lightning, editing, deliberate
composition; Dorr et al., 2010), and the limited spatial spread of gazes
due to the fact that Hollywood movies center the viewers gaze in the middle
of the screen (Goldstein et al., 2007) creates spatial closeness between
successive samples that is artificially elevated. The resulting precision
value underestimates the true noise, and misleads and distracts from the
other noise estimations we have provided: Raw gaze samples, velocity distributions,
percent signal loss, and references/citations of precision estimates that
are based on 13 point calibration task validations under both MRI and lab
conditions (i.e., precision estimates from dedicated precision assessments,
not from the eyetracking signal acquired during moving watching).
The results of this computation are included below. It should be obvious that
the resulting numerical precision estimates seem implausible, especially
given the obviously more unstable eyetracking signal plotted in the publication
and the about 10 times higher precision estimates (i.e., 10 times worse)
computed in the original studyforrest data publication from 13 point
calibration procedures:
OUTPUT:
Currently processing data from lab sample
\newcommand{\RMSlab}{0.05849002328619968}
Currently processing data from mri sample
\newcommand{\RMSmri}{0.061214090261903865}
Results when computing the mean instead of the median
OUTPUT:
Currently processing data from lab sample
\newcommand{\RMSlab}{0.08485199260774937}
Currently processing data from mri sample
\newcommand{\RMSmri}{0.10376796578456919}
References:
<NAME>., <NAME>., <NAME>., & <NAME>. (2010).
Variability of eye movements when viewing dynamic natural scenes.
Journal of vision , 10 (10).
https://doi.org/10.1167/10.10.28
<NAME>., <NAME>., & <NAME>. (2007).
Where people look when watching movies: Do all viewers look at the same
place? 37 (7), Computers in biology and medicine, 957-964.
10.1016/j.compbiomed.2006.08.018
<NAME>., <NAME>., <NAME>. et al. (2016)
A studyforrest extension, simultaneous fMRI and eye gaze recordings
during prolonged natural stimulation. Sci Data 3, 160092.
https://doi.org/10.1038/sdata.2016.92
"""
# globs for all data for MRI and Lab
datapath_mri = op.join('data', 'raw_eyegaze', 'sub-*', 'ses-movie', 'func',
'sub-*_ses-movie_task-movie_run-*_recording-eyegaze_physio.tsv.gz')
datapath_lab = op.join('data', 'raw_eyegaze', 'sub-*', 'beh',
'sub-*_task-movie_run-*_recording-eyegaze_physio.tsv.gz')
# compute results for both groups sequentially, first lab, then MRI
for (data, assoc) in [(datapath_lab, 'lab'),
(datapath_mri, 'mri')]:
infiles = glob(data)
# make sure we have 15 subjects' data
assert len(infiles) == 120
print("Currently processing data from {} sample".format(assoc))
# set px2deg for conversion to visual angles and window size (chose 1000
# as a middle ground between "still computes withing a few hours" and
# "create a lot of windows" (about 900 per 15 min segment).
px2deg = 0.0266711972026 if assoc == 'lab' else 0.0185581232561
window = 1000
median_distances = []
for f in infiles:
datalad_get(f)
data = np.genfromtxt(f,
delimiter='\t',
usecols=(0, 1) # get only x and y column
)
# chunk into windows
datachunks = np.array_split(data, window)
distances = []
for chunk in datachunks:
# calculate window-wise S2S-RSM, based on euclidean distance between
# consecutive x-y-coordinates, and convert them into visual angles
dist = distance.cdist(chunk, chunk, 'euclidean').diagonal(1) * px2deg
# square the distances, average them, and square-root them
RMS = np.sqrt(np.nanmean(np.square(dist)))
distances.append(RMS)
# calculate median RMS per run (~15 minute recording, i.e., median over
# about 900 RMS values)
median = np.median(distances)
median_distances.append(median)
# average the resulting 8 median RMSs across all subjects
meanRMS = np.nanmean(median_distances)
# print results as Latex command using 'assoc' as sample identifier in name
label_RMS = 'RMS{}'.format(assoc)
rsout('\\newcommand{\\%s}{%s}'
% (label_RMS, meanRMS))
# save the results in variables
if assoc == 'lab':
RMS_lab = meanRMS
elif assoc == 'mri':
RMS_mri = meanRMS
def flowchart_figs():
"""
Just for future reference: This is the subset of preprocessed and raw data
used for the flowchart of the algorithm. Not to be executed.
"""
datapath = op.join('data', 'raw_eyegaze', 'sub-32', 'beh',
'sub-32_task-movie_run-1_recording-eyegaze_physio.tsv.gz')
data = np.recfromcsv(datapath,
delimiter='\t',
names=['x', 'y', 'pupil', 'frame'])
clf = EyegazeClassifier(
px2deg=0.0266711972026,
sampling_rate=1000.0)
velocities = cal_velocities(data=data, sr=1000, px2deg=0.0266711972026)
vel_subset_unfiltered = velocities[15200:17500]
p = clf.preproc(data)
# this is to illustrate PTn estimation and chunking
vel_subset = p['vel'][15200:17500]
fig, ax1 = plt.subplots()
fig.set_figheight(2)
fig.set_figwidth(7)
fig.set_dpi(120)
ax1.plot(
vel_subset,
color='black', lw=0.5)
plt.close()
# this is to illustrate preprocessing
fig, ax1 = plt.subplots()
fig.set_figheight(2)
fig.set_figwidth(7)
fig.set_dpi(120)
ax1.plot(
vel_subset,
color='black', lw=0.5)
ax1.plot(
vel_subset_unfiltered,
color='darkorange', ls='dotted', lw=0.5)
plt.close()
def _butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(
order,
normal_cutoff,
btype='low',
analog=False)
return b,
# Here is fixation and pursuit detection on Butterworth filtered subsets
lp_cutoff_freq = 4.0
sr=1000
# let's get a data sample with no saccade
win_data = p[16600:17000]
b, a = _butter_lowpass(lp_cutoff_freq, sr)
win_data['x'] = filtfilt(b, a, win_data['x'], method='gust')
win_data['y'] = filtfilt(b, a, win_data['y'], method='gust')
filtered_vels = cal_velocities(data=win_data, sr=1000, px2deg=0.0266711972026)
fig, ax1 = plt.subplots()
fig.set_figheight(2)
fig.set_figwidth(7)
fig.set_dpi(120)
ax1.plot(
filtered_vels,
color='black', lw=0.5)
plt.close()
def cal_velocities(data, sr, px2deg):
"""Helper to calculate velocities
sr: sampling rate
px2deg: conversion factor from pixel to degree
"""
velocities = (np.diff(data['x']) ** 2 + np.diff(
data['y']) ** 2) ** 0.5
velocities *= px2deg * sr
return velocities
def mk_raw_vel_trace_figures():
"""
Small helper function to plot raw velocity traces, as requested by reviewer 2
in the second revision.
"""
# use the same data as in mk_eyegaze_classification_figures()
# (no need for file retrieval, should be there)
datalad_get(op.join('data', 'raw_eyegaze'), get_data=False)
infiles = [
op.join(
'data',
'raw_eyegaze', 'sub-32', 'beh',
'sub-32_task-movie_run-5_recording-eyegaze_physio.tsv.gz'),
op.join(
'data',
'raw_eyegaze', 'sub-02', 'ses-movie', 'func',
'sub-02_ses-movie_task-movie_run-5_recording-eyegaze_physio.tsv.gz'
),
]
# we need the sampling rate for plotting in seconds and velocity calculation
sr = 1000
# load data
for i, f in enumerate(infiles):
# read data
datalad_get(f)
data = np.recfromcsv(f,
delimiter='\t',
names=['x', 'y', 'pupil', 'frame'])
# subset data. Hessels et al., 2017 display different noise levels on 4
# second time series (ref. Fig 10). That still looks a bit dense, so we
# go with 2 seconds, from start of 10sec excerpt to make it easier to
# associate the 2 sec excerpt in to its place in the 10 sec excerpt
# above
data_subset = data[15000:17000]
px2deg, ext = (0.0266711972026, 'lab') if '32' in f \
else (0.0185581232561, 'mri')
# take raw data and convert it to velocity: euclidean distance between
# successive coordinate samples. Note: no entry for first datapoint!
# Will plot all but first data point in other time series
velocities = cal_velocities(data_subset, sr, px2deg)
vel_color = 'xkcd:gunmetal'
# prepare plotting - much manual setup, quite ugly - sorry
fig, ax1 = plt.subplots()
fig.set_figheight(2)
fig.set_figwidth(7)
fig.set_dpi(120)
time_idx = np.linspace(0, len(data_subset) / sr, len(data_subset))[1:]
max_x = float(len(data_subset) / sr)
ax1.set_xlim(0, max_x)
ax1.set_xlabel('time (seconds)')
ax1.set_ylabel('coordinates')
# left y axis set to max screensize in px
ax1.set_ylim(0, 1280)
# plot gaze trajectories (not preprocessed)
ax1.plot(time_idx,
data_subset['x'][1:],
color='black', lw=1)
ax1.plot(
time_idx,
data_subset['y'][1:],
color='black', lw=1)
# right y axis shows velocity "as is" (not preprocessed)
ax2 = ax1.twinx()
ax2.set_ylabel('velocity (deg/sec)', color=vel_color)
ax2.tick_params(axis='y', labelcolor=vel_color)
#ax2.set_yscale('log') ## TODO: Log scale or not?
ax2.set_ylim(1, 2000)
ax2.plot(time_idx,
velocities,
color=vel_color, lw=1)
plt.savefig(
op.join('img', 'rawtrace_{}.svg'.format(ext)),
transparent=True,
bbox_inches="tight",
metadata={'Date': None})
plt.close()
def mk_eyegaze_classification_figures():
"""
small function to generate and save remodnav classification figures
"""
# use two examplary files (lab + MRI) used during testing as well
# hardcoding those, as I see no reason for updating them
infiles = [
op.join(
'data',
'raw_eyegaze', 'sub-32', 'beh',
'sub-32_task-movie_run-5_recording-eyegaze_physio.tsv.gz'),
op.join(
'data',
'raw_eyegaze', 'sub-02', 'ses-movie', 'func',
'sub-02_ses-movie_task-movie_run-5_recording-eyegaze_physio.tsv.gz'
),
]
# one call per file due to https://github.com/datalad/datalad/issues/3356
for f in infiles:
datalad_get(f)
for f in infiles:
# read data
data = np.recfromcsv(f,
delimiter='\t',
names=['x', 'y', 'pupil', 'frame'])
# adjust px2deg conversion factor according to datafile
pxdeg, ext = (0.0266711972026, 'lab') if '32' in f \
else (0.0185581232561, 'mri')
clf = EyegazeClassifier(
px2deg=pxdeg,
sampling_rate=1000.0)
p = clf.preproc(data)
# lets go with 10 seconds to actually see details. This particular time
# window is within the originally plotted 50s and contains missing data
# for both data types (lab & mri)
events = clf(p[15000:25000])
# we remove plotting of details in favor of plotting raw gaze and
# velocity traces with mk_raw_vel_trace_figures() as requested by reviewer 2
# in the second round of revision
#events_detail = clf(p[24500:24750])
fig = plt.figure(
# fake size to get the font size down in relation
figsize=(14, 2),
dpi=120,
frameon=False)
ut.show_gaze(
pp=p[15000:25000],
events=events,
sampling_rate=1000.0,
show_vels=True,
coord_lim=(0, 1280),
vel_lim=(0.001, 1000))
plt.savefig(
op.join('img', 'remodnav_{}.svg'.format(ext)),
transparent=True,
bbox_inches="tight",
metadata={'Date': None})
plt.close()
# plot details
fig = plt.figure(
# fake size to get the font size down in relation
figsize=(7, 2),
dpi=120,
frameon=False)
#ut.show_gaze(
# pp=p[24500:24750],
# events=events_detail,
# sampling_rate=1000.0,
# show_vels=True,
# coord_lim=(0, 1280),
# vel_lim=(0, 1000))
#plt.savefig(
# op.join('img', 'remodnav_{}_detail.svg'.format(ext)),
# transparent=True,
# bbox_inches="tight")
#plt.close()
def mk_mainseq_figures(s_mri, s_lab):
"""
plot main sequences from movie data for lab and mri subjects.
"""
datapath = op.join('data',
'studyforrest-data-eyemovementlabels',
'sub*',
# limit to a single run, otherwise the resulting
# figure becomes so complex that it needs >16GB
# RAM to turn into an image for the manuscript,
# while the visible content hardly changes
'*run-2*.tsv')
data = sorted(glob(datapath))
datalad_get(path=data)
# create dataframes for mri and lab subjects to plot in separate plots
for (ids, select_sub, ext) in [
(mri_ids, s_mri, 'mri'),
(lab_ids, s_lab, 'lab')]:
# load data from any file matching any of the subject IDs
dfs = [
pd.read_csv(f, header=0, delim_whitespace=True)
for f in data
if any('sub-{}'.format(i) in f for i in ids)
]
df = pd.concat(dfs)
# also create a dataframe for an individual subjects run
sub = op.join('data',
'studyforrest-data-eyemovementlabels',
select_sub,
'{}_task-movie_run-2_events.tsv'.format(select_sub))
sub_df = pd.read_csv(sub, header=0, delim_whitespace=True)
for d, label in (
(df, ''),
(sub_df, '_sub')):
# extract relevant event types
SACCs = d[(d.label == 'SACC') | (d.label == 'ISAC')]
PSOs = d[(d.label == 'HPSO') | (d.label == 'IHPS') | (d.label == 'LPSO') | (d.label == 'ILPS')]
fig = plt.figure(
# fake size to get the font size down in relation
figsize=(6, 4),
dpi=120,
frameon=False)
for ev, sym, color in (
(SACCs, '.', 'red'),
(PSOs, '+', 'darkblue'),
):
plt.loglog(
ev['amp'],
ev['peak_vel'],
sym,
# scale alpha down with increasing number of data points
alpha=min(0.1, 1.0 / max(0.0001, 0.002 * len(ev))),
color=color,
lw = 1,
rasterized=True
)
# cheat: custom legend to not propagate alpha into legend markers
custom_legend = [
Line2D([0], [0],
marker='.',
color='w',
markerfacecolor='red',
label='Saccade',
markersize=10),
Line2D([0], [0],
marker='P',
color='w',
markerfacecolor='darkblue',
label='PSO',
#label='Low velocity PSOs',
markersize=10),
]
plt.ylim((10.0, 1000))
plt.xlim((0.01, 40.0))
plt.legend(handles=custom_legend, loc=4)
plt.ylabel('peak velocities (deg/s)')
plt.xlabel('amplitude (deg)')
plt.savefig(
op.join(
'img',
'mainseq{}_{}.svg'.format(
label,
ext)),
transparent=True,
bbox_inches="tight",
metadata={'Date': None})
plt.close()
def RMSD(mn,
sd,
no):
"""
Compute our interpretation of the RMSD, following equation 2 in
Andersson et al., 2017
Parameters
----------
mn, sd, no
lists with mean, standard deviation, and number of events for
a given event type and stimulus type for all available algorithms.
Returns
-------
1d-array
RMSD score per "algorithm". The first two values represent the
human coders."""
per_param = []
# convert params to array
mn, sd, no = np.array(mn), np.array(sd), np.array(no)
# compute the root mean square difference between algorithm and mean
# of coders per parameter and algorithm
for l in [mn, sd, no]:
l_scaled = l / float(np.max(l))
l_alg = np.sqrt((
# all scores
# minus the average of the humans
l_scaled - np.mean(l_scaled[:2])) ** 2
)
per_param.append(l_alg)
# sum the root mean square differences per algorithm across parameters
# also give the human rater performance as the first two values
# argsorting twice gets us the ranks
return np.array(per_param).sum(axis=0).argsort().argsort()
def get_remodnav_params(stim_type):
"""
Function to generate distribution parameters for event types.
Used for the RMSD computation.
Parameters
----------
stim_type = one str of 'img', 'dots', 'video'
Returns
-------
a dictionary with distribution parameters for all events for the given
stim_type
"""
# iterate through stim_types
events = []
# the data files exist twice (one per coder). The raw eye gaze data in corresponding
# files should be the same, so I assume its safe to just take one coders files.
src = 'RA'
for fname in labeled_files[stim_type]:
data, target_labels, target_events, px2deg, sr = \
load_anderson(stim_type, fname.format(src))
clf = EyegazeClassifier(
px2deg=px2deg,
sampling_rate=sr,
)
p = clf.preproc(data)
events.extend(clf(p))
for ev in events:
ev['label'] = label_map[ev['label']]
durs = OrderedDict()
durs['event']=[]
durs['alg']=[]
durs['mn']=[]
durs['sd']=[]
durs['no']=[]
# iterate through relabeled event types
for ev_type in ['FIX', 'SAC', 'PUR', 'PSO']:
durations = get_durations(events, ev_type)
durs['event'].append(ev_type)
durs['mn'].append(int(np.nanmean(durations) * 1000))
durs['sd'].append(int(np.nanstd(durations) * 1000))
durs['no'].append(len(durations))
durs['alg'].append('RE')
return durs
def print_RMSD():
"""
Function to generate tables 3, 4, 5, partial 6 from Andersson et al., 2017
for use in main.tex.
"""
# I don't want to overwrite the original dicts
img = deepcopy(image_params)
dots = deepcopy(dots_params)
video = deepcopy(video_params)
event_types = ['FIX', 'SAC', 'PSO', 'PUR']
for stim in ['img', 'dots', 'video']:
durs = get_remodnav_params(stim)
dic = [img if stim == 'img' else dots if stim == 'dots' else video]
# append the parameters produced by remodnav to the other algorithms'
for ev in event_types:
for p in ['mn', 'sd', 'no', 'alg']:
# unfortunately, dic is a list now...thats why [0] is there.
# index the dicts with the position of the respective event type
dic[0][ev][p].append(durs[p][durs['event'].index(ev)])
# print results as LaTeX commands
# within a stim_type, we iterate over keys (events and params) in the nested dicts
for par in ['mn', 'sd', 'no']:
# index the values of the dist params in the nested dicts with the position
# of the respective algorithm.
for alg in dic[0][ev]['alg']:
label_prefix = '{}{}{}{}'.format(ev, stim, par, alg)
# take the value of the event and param type by indexing the dict with the position of
# the current algorithm
rsout('\\newcommand{\\%s}{%s}'
%(label_prefix, dic[0][ev][par][dic[0][ev]['alg'].index(alg)]))
# compute RMSDs for every stimulus category
for ev in event_types:
rmsd = RMSD(dic[0][ev]['mn'],
dic[0][ev]['sd'],
dic[0][ev]['no'])
# print results as LaTeX commands
algo = dic[0][ev]['alg']
for i in range(len(rmsd)):
label = 'rank{}{}{}'.format(ev, stim, algo[i])
rsout('\\newcommand{\\%s}{%s}'
%(label, rmsd[i]))
def mk_event_duration_histograms(figures):
"""
Plot the events duration distribution per movie run, per data set.
"""
# do nothing if we don't want to plot
if not figures:
return
datalad_get(op.join('data', 'studyforrest-data-eyemovementlabels'))
datapath = op.join('data',
'studyforrest-data-eyemovementlabels',
'sub*',
'*.tsv')
data = sorted(glob(datapath))
datalad_get(path=data, get_data=False)
for ds, ds_name in [(mri_ids, 'mri'), (lab_ids, 'lab')]:
dfs = [
pd.read_csv(f, header=0, delim_whitespace=True)
for f in data
if any('sub-{}'.format(i) in f for i in ds)
]
df = | pd.concat(dfs) | pandas.concat |
"""
Experiment 1: swarm tec correlation
- for various background estimation sizes and artifact keys:
- collect random days
- get dtec prof
- interpolate swarm dne at the profile points
- estimate mean and covariance between the two
"""
import numpy as np
import pandas
from ttools import io, rbf_inversion, swarm, utils, config, convert
LW = 9
def run_experiment(n, bg_est_shape, artifact_key):
start_date = np.datetime64("2014-01-01")
end_date = np.datetime64("2020-01-01")
time_range_days = (end_date - start_date).astype('timedelta64[D]').astype(int)
offsets = np.random.randint(0, time_range_days, n)
dates = start_date + offsets.astype('timedelta64[D]')
x = []
dne = []
mlat_x = []
mlat_dne = []
for date in dates:
_x, _dne, _mlat_x, _mlat_dne = run_day(date, bg_est_shape, artifact_key)
x += _x
dne += _dne
mlat_x += _mlat_x
mlat_dne += _mlat_dne
x = np.concatenate(x, axis=0)
dne = np.concatenate(dne, axis=0)
mlat_x = np.array(mlat_x)
mlat_dne = np.array(mlat_dne)
data = np.column_stack((x, dne))
mean = np.nanmean(data, axis=0)
cov = | pandas.DataFrame(data=data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
get_ipython().system('pip install numpy')
get_ipython().system('pip install pandas')
get_ipython().system('pip install matplotlib')
get_ipython().system('pip install seaborn')
get_ipython().system('pip install pandas-profiling')
get_ipython().getoutput('pip install scikit-learn')
# In[31]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[32]:
path = "https://raw.githubusercontent.com/reddyprasade/Machine-Learning-Problems-DataSets/master/Classification/Breast%20cancer%20wisconsin.csv"
# In[33]:
data = pd.read_csv(path);
print("\n \t The data frame has {0[0]} rows and {0[1]} columns. \n".format(data.shape))
# In[34]:
data.head()
# In[35]:
data.tail()
# In[36]:
data.drop(['Unnamed: 0'],axis =1, inplace =True)
# In[37]:
data.info()
# In[38]:
diagnosis_all = list(data.shape)[0]
diagnosis_categories = list(data['diagnosis'].value_counts())
print("\n \t The data has {} Rows I ahAve among diagnosis, {} malignant and {} benign.".format(diagnosis_all,
diagnosis_categories[0],
diagnosis_categories[1]))
# In[39]:
sns.countplot(diagnosis_categories)
# In[40]:
features_mean= list(data.columns[1:11])
features_mean
# In[41]:
plt.figure(figsize=(30,15))
sns.heatmap(data[features_mean].corr(),annot=True,square=True,cmap="coolwarm")
# In[42]:
from pandas.plotting import scatter_matrix
# In[43]:
color_dir = {"M":'green','B':'blue'}
colors = data['diagnosis'].map(lambda x:color_dir.get(x))
| scatter_matrix(data[features_mean],c=colors,alpha=0.9,figsize=(20,20)) | pandas.plotting.scatter_matrix |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import platform
import unittest
from itertools import combinations, combinations_with_replacement, product
from numba.core.config import IS_32BITS
from numba.core.errors import TypingError
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (skip_numba_jit,
_make_func_from_text,
gen_frand_array)
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
class TestSeries_ops(TestCase):
def test_series_operators_int(self):
"""Verifies using all various Series arithmetic binary operators on two integer Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
# integers to negative powers are not allowed
if (operator == '**' and np.any(data_right < 0)):
data_right = np.abs(data_right)
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_int_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on an integer Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
scalar_values = [1, -1, 0, 3, 7, -5]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
# integers to negative powers are not allowed
if (operator == '**' and np.any(right < 0)):
right = abs(right)
with self.subTest(left=left, right=right, operator=operator):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(left, right), test_impl(left, right), check_dtype=False)
def test_series_operators_float(self):
"""Verifies using all various Series arithmetic binary operators on two float Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_float_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on a float Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
scalar_values = [1., -1., 0., -0., 7., -5.]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
with self.subTest(left=left, right=right, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar), check_dtype=False)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
A1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
A2 = A1.copy(deep=True)
B = pd.Series(np.ones(n - 1), name='B')
hpat_func(A1, B)
test_impl(A2, B)
pd.testing.assert_series_equal(A1, A2)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace_scalar(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
S1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
S2 = S1.copy(deep=True)
hpat_func(S1, 1)
test_impl(S2, 1)
pd.testing.assert_series_equal(S1, S2)
@skip_numba_jit('operator.neg for SeriesType is not implemented in yet')
def test_series_operator_neg(self):
def test_impl(A):
return -A
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_operators_comp_numeric(self):
"""Verifies using all various Series comparison binary operators on two integer Series with various indexes"""
n = 11
data_left = [1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0]
data_right = [3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1]
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]}
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for dtype, index_data in dtype_to_index.items():
with self.subTest(operator=operator, index_dtype=dtype, index=index_data):
A = pd.Series(data_left, index=index_data)
B = | pd.Series(data_right, index=index_data) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.