prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from functools import partial
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import os
def results_plot(sol, data, depth, time, h, y_total):
'''
Not in use anymore
:param sol:
:param data:
:param depth:
:param time:
:param h:
:param y_total:
:return:
'''
plt.rcParams['savefig.dpi'] = 1200
fig, ax = plt.subplots(1,1)
res = sol.y[:, -1].reshape((2, int(depth / h)))
x_pred = np.array([i*h for i in range(int(depth/h))])
K_pred = res[0, :]
Na_pred = res[1, :]
Li_pred = y_total-res[0, :] - res[1, :]
ax.plot(x_pred, K_pred, lw=1.5, c='r', label='c(K2O) ')
ax.plot(x_pred, Na_pred, lw=1.5, c='b', label='c(Na2O)')
ax.plot(x_pred, Li_pred, lw=1.5, c='m', label='c(Li2O)')
ax.scatter(data.depth[2:], data.K2O[2:], s=5, c='r', alpha=0.1, )
ax.scatter(data.depth[2:], data.Na2O[2:], s=5, c='b', alpha=0.1, )
ax.scatter(data.depth[2:], data.Li2O[2:], s=5, c='m', alpha=0.1, )
ax.set_ylim(-0.5, 20.5)
ax.set_xlabel('Depth (um)', fontsize=13)
ax.set_ylabel('concentration (mol%)', fontsize=13)
ax.legend()
ax.grid(True)
return ax
class MyTakeStep:
'''
Not in use
For fine controlling of the behaviour of differential evolution method, please check the official
documentation of differential evolution method in Scipy for details
'''
def __init__(self, stepsize=0.5, same_temp=1):
self.stepsize = stepsize
self.same_temp = same_temp
def __call__(self, x, *args, **kwargs):
s = self.stepsize
if self.same_temp == 1:
x[0] += np.random.uniform(-1e-4*s, 1e-4*s)
x[1:3] += np.random.uniform(-1e-1*s, 1e-1*s, x[1:3].shape)
x[3:] += np.random.uniform(-s, s, x[3:].shape)
return x
elif self.same_temp == 0:
x[0] += np.random.uniform(-1e-4*s, 1e-4*s)
x[1:3] += np.random.uniform(-1e-1 * s, 1e-1 * s, x[1:3].shape)
x[3] += np.random.uniform(-1e-4*s, 1e-4*s)
x[4:6] += np.random.uniform(-1e-1 * s, 1e-1 * s, x[4:6].shape)
x[6:] += np.random.uniform(-s, s, x[6:].shape)
return x
class MyBoundsBasin:
'''
Not in use
For fine controlling of the behaviour of basin hopping method, please check the official
documentation of basin hopping method in Scipy for details
'''
def __init__(self, xmax, xmin, same_temp=1):
self.same_temp = same_temp
self.xmax = xmax
self.xmin = xmin
def __call__(self, **kwargs):
x = kwargs['x_new']
tmax = bool(np.all(x < self.xmax))
tmin = bool(np.all(x > self.xmin))
tdiff = True
t_surf = True
if self.same_temp == 1:
tdiff = bool(True)
t_surf = bool(x[3]/x[4] < x[5]/x[6])
elif self.same_temp == 0:
tdiff = bool(np.all(x[0:3] > x[3:6]))
t_surf = bool(x[6]/x[7] < x[8]/x[9])
return tmax and tmin and tdiff and t_surf
def con_diff_coeff(x):
'''
constraints on diffusion coefficient of two steps' ion exchange
:param x: x=[Dk_1st, Dna_1st, Dli_1st, Dk_2nd, Dna_2nd, Dli_2nd, Ck_1st, Cna_1st, Ck_2nd, Cna_2nd],
if the temp of 1st step IOX is higher, then the tracer diffusion coeff shall be larger than those for
the 2nd step IOX respectively
:return:
'''
return np.array([x[0]-x[3], x[1]-x[4], x[2]-x[5]])
def con_surf_c_st(x):
'''
surface concentration constraint when temperatures of both IOX steps are the same
:param x: x=[Dk, Dna, Dli, Ck_1st, Cna_1st, Ck_2nd, Cna_2nd],
since the ion exchange temperature is same for both steps, Dk_1st=Dk_2nd=Dk, Dna_1st=Dna_2nd=Dna
:return: Ck_2nd/Cna_2nd > Ck_1st/Cna_1st
'''
return - x[3]/x[4] + x[5]/x[6]
def con_surf_c_1st(x):
'''
surface concentration constraint when temperatures of both IOX steps are the same
:param x: x=[Dk, Dna, Dli, Ck_1st, Cna_1st, Ck_2nd, Cna_2nd],
:return: 1 > Ck_1st/Cna_1st
'''
return x[4]/x[3]-1
def con_surf_c_dt(x):
'''
surface concentration constraints when temperatures of both IOX steps are the different
:param x: x=[Dk_1st, Dna_1st, Dli_1st, Dk_2nd, Dna_2nd, Dli_2nd, Ck_1st, Cna_1st, Ck_2nd, Cna_2nd]
:return: Ck_2nd/Cna_2nd > Ck_1st/Cna_1st
'''
return - x[6]/x[7] + x[8]/x[9]
def print_fun_basin(x, f, accepted, name, method):
## LOG info during basin-hopping optimization
print('for {} by {} : x= {} on minimum = {} accepted_{}'.format(name, method, x, f, int(accepted)))
def print_fun_DE(xk, convergence, name, method):
## LOG info during differential evolution optimization
print('for {} by {} : x= {} on convergence = {} '.format(name, method, xk, convergence))
def shgo_bounds(xmin, xmax):
'''
Not in use
bounds for shgo method
:param xmin:
:param xmax:
:return:
'''
bounds = []
for l, u in zip(xmin, xmax):
bounds.append((l, u))
return bounds
def fspro_convertor(file_path, file_name):
'''
To convert the output from fspro into a regular excel file
:param file_path:
:param file_name:
:return:
'''
df = pd.read_csv(os.path.join(file_path, file_name))
res = np.zeros((len(df), 2))
for i in range(len(df)):
res[i, 0] = float(df.iloc[i, 0].split('\t')[0])
res[i, 1] = float(df.iloc[i, 0].split('\t')[1])
cols = ['stress_x', 'stress_y']
res_df = | pd.DataFrame(res, columns=cols) | pandas.DataFrame |
import os
import collections
import itertools
import biom
import skbio
import ternary
import numpy as np
import pandas as pd
import matplotlib.cm as cm
from matplotlib.colors import to_hex
import matplotlib.pyplot as plt
from jinja2 import Environment, BaseLoader
import qiime2 as q2
from q2_gamma.geommed import geometric_median
from q2_feature_table import group
def simple_plot(output_dir, table: biom.Table, feature_tree: skbio.TreeNode,
metadata: q2.Metadata, case_where: str, control_where: str,
n_transects: int=10, stratify_by: str=None, mode: str='max'):
print("Data extracted")
layer_dir = os.path.join(output_dir, 'layers')
rank_dir = os.path.join(output_dir, 'ranks')
os.mkdir(layer_dir)
os.mkdir(rank_dir)
metadata = metadata.filter_ids(table.ids(axis='sample'))
case_samples = sorted(list(metadata.get_ids(case_where)))
control_samples = sorted(list(metadata.get_ids(control_where)))
get_pairs = comparisons(metadata, control_samples, case_samples, stratify_by)
table.filter(case_samples + control_samples)
table.remove_empty('observation')
features = list(table.ids(axis='observation'))
feature_tree = shear_no_prune(feature_tree, features)
print("Extraneous features removed")
for n in feature_tree.traverse():
if not n.length:
n.length = 0
tree = tree_to_array(feature_tree, mode)
print("Tree index created")
possible_transects = len(np.unique(np.asarray(tree['distances'])))
tree_length = tree['distances'][0] # root of tree
if n_transects > possible_transects:
n_transects = possible_transects
print("Only %d transects exist, using that instead" % n_transects)
transects = list(np.linspace(0, tree_length, num=n_transects))
print("Will transect at: %s" % ", ".join(map(str, transects)))
figure_gen = prepare_plot(tree_length)
figure_gen.send(None) # initialize co-routine
colors = []
points, _ = pairwise_components(table, get_pairs())
color_fig, highlight_fig, color = figure_gen.send((points, None))
color_fig.savefig(os.path.join(layer_dir, 'original.png'), transparent=True)
plt.close(color_fig)
highlight_fig.savefig(os.path.join(layer_dir, 'original.h.png'), transparent=True)
plt.close(highlight_fig)
colors.append(color)
rank_files = []
collapsed_groups = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = | Timestamp(scalar) | pandas.Timestamp |
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from pandas.api.types import is_object_dtype, is_numeric_dtype
from scipy import stats
def grubbs_remove_outlier(X, alpha):
'''
This function runs Grubbs’ Statistical Test to detect and remove the outlier based on significance level
and grubbs value & critical value.
Parameters
----------
X : ndarray
A python list to be tested for outliers.
alpha : float
The significance level.
Returns
-------
X : ndarray
The original array with one outlier removed.
'''
# Calculte Grubbs Statistics Value
std_dev = np.std(X)
avg_X = np.mean(X)
abs_val_minus_avg = abs(X - avg_X)
max_of_deviations = max(abs_val_minus_avg)
grubbs_value = max_of_deviations / std_dev
# Find Max Index
max_index = np.argmax(abs_val_minus_avg)
outlier = X[max_index]
# Calculte Grubbs Critical Value
size = len(X)
t_dist = stats.t.ppf(1 - alpha / (2 * size), size - 2)
numerator = (size - 1) * np.sqrt(np.square(t_dist))
denominator = np.sqrt(size) * np.sqrt(size - 2 + np.square(t_dist))
grubbs_critical_value = numerator / denominator
if grubbs_value > grubbs_critical_value:
print('{} is an outlier. Grubbs value > Grubbs critical: {:.4f} > {:.4f} \n'.format(
outlier, grubbs_value, grubbs_critical_value))
#X = np.delete(X, np.where(X == outlier) )
X.remove(outlier)
return X
def grubbs(X, max_outliers, alpha=0.05, name=''):
'''
Grubbs’ Test is also known as the maximum normalized residual test or extreme studentized deviate test is a test used to detect outliers in a univariate data set assumed to come from a normally distributed population. This test is defined for the hypothesis:
Ho: There are no outliers in the dataset
Ha: There is exactly one outlier in the dataset
Parameters
----------
X : ndarray
A python list to be tested for outliers.
alpha : float
The significance level.
max_outliers: int
The maximum number of iteration that will be deleted by this method
Returns
-------
X : ndarray
The original array with outliers removed.
outliers: the list of all outliers
'''
init_x_len = len(X)
reach_end = False
clean_X = X.copy().tolist()
for i in range(max_outliers):
clean_X = grubbs_remove_outlier(clean_X, alpha)
if len(X) == len(clean_X):
reach_end = True
break
if name != "":
name = " in " + name + " dataset"
else:
name = " in the dataset "
outlisers = []
if (len(X)-len(clean_X) == 0):
print("we can not reject the null hypothesis and there are no outliers " + name)
else:
print("we reject the null hypothesis and there are outliers "+name)
outlisers = [value for value in X if value not in clean_X]
if reach_end:
print("Found all Grubbs outliers, " +
str(len(X)-len(clean_X)) + " outliers")
else:
print("Found some Grubbs outliers, " + str(len(X)-len(clean_X)
) + " outliers. increase the max itration to find more")
return clean_X, outlisers
def detect_outliers(X, factor=1.5):
# list to store outlier indices
outlier_indices = []
# Get the 1st quartile (25%)
Q1 = np.percentile(X, 25)
# Get the 3rd quartile (75%)
Q3 = np.percentile(X, 75)
# Get the Interquartile range (IQR)
IQR = Q3 - Q1
# Define our outlier step
outlier_step = 1.5 * IQR
return X[(X < Q1 - outlier_step) | (X > Q3 + outlier_step)]
def missing_values_table(df):
"""
Returns a pandas DataFrame containing the counts of missing values by column in the input DataFrame
"""
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
mis_val_table_ren_columns = mis_val_table.rename(
columns={0: 'Missing Values', 1: '% of Total Values'})
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:, 1] != -1].sort_values(
'% of Total Values', ascending=False).round(1)
# print("Your selected dataframe has " + str(df.shape[1]) + " column/s and " + str(df.shape[0]) + " row/s.\n"
# "There are " + str(mis_val_table_ren_columns.shape[0]) +
# " columns that have missing values.")
return mis_val_table_ren_columns
#logic preparation
def logic_preparation(x_df, no_treatment_columns = [] ):
"""
This function provides a recommendation on how to deal with each feauture's missing values based on the missing values' percentage.
"""
missing = missing_values_table(x_df).iloc[:,1]
df_missing = | pd.DataFrame(columns=['name', 'missing','treatment', 'comment' ]) | pandas.DataFrame |
import pandas as pd
import math
import sqlite3 as sql
def read_tables_info(con):
data = pd.read_sql_query('select * from tables_info',con,index_col='index')
return data
def is_table_exists(cursor,table_name):
cursor.execute('select count(*) from sqlite_master where type="table" and name="'+table_name+'"')
values = cursor.fetchall()
#print(values[0][0])
return values[0][0] == 1
def table_info(cursor,table_name):
cursor.execute('pragma table_info("'+table_name+'")')
values = cursor.fetchall()
print(values)
def read_trade_cal(con):
data = pd.read_sql_query('select * from trade_cal',con,index_col='index')
return data
def read_daily_by_date(con,sdate,edate):
sql_str = 'select * from daily where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_daily_by_tscode(con,tscode):
sql_str = 'select * from fut_daily where ts_code = "'+tscode+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_daily_by_symbol(con,symbol):
sql_str = 'select * from fut_daily where symbol = "'+symbol+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_holding_by_symbol(con,symbol):
sql_str = 'select * from fut_holding where symbol = "'+symbol+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_concept_info(con):
sql_str = 'select * from concept_info'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_concept_detail(con):
sql_str = 'select * from concept_detail'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_daily_by_tscode(con,tscode):
sql_str = 'select * from daily where ts_code="'+tscode+'"'
data = pd.read_sql_query(sql_str,con)
return data
def read_daily_basic_by_date(con,sdate,edate):
sql_str = 'select * from daily_basic where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_margin_detail_by_date(con,sdate,edate):
sql_str = 'select * from margin_detail where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_hk_hold_by_date(con,sdate,edate):
sql_str = 'select * from hk_hold where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_daily_by_date_and_tscode(con,tscode,sdate,edate):
sql_str = 'select * from daily where ts_code="'+tscode+'" and trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='trade_date')
return data
def read_daily_basic_by_tscode(con,tscode):
sql_str = 'select * from daily_basic where ts_code="'+tscode+'"'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and (cal_date not in (select trade_date from daily) or cal_date not in (select trade_date from daily_basic))'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_daily(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from daily)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_daily_basic(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from daily_basic)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_adj_factor(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from adj_factor)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_block_trade(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from block_trade)'
data = | pd.read_sql_query(sql_str,con) | pandas.read_sql_query |
from casadi import *
from scipy import stats
import pandas as pd
import pyDOE as pyDoE
def plant_model_real(sens):
"""
Define the model that is meant to describe the physical system
:return: model f
"""
nx = 5
ntheta = 8
nu = 4
x = MX.sym('x', nx)
u = MX.sym('u', nu)
theta = MX.sym('theta', ntheta)
x_p = MX.sym('xp', np.shape(x)[0] * np.shape(theta)[0])
k1 = exp(theta[0] - theta[1] * 1e4 / 8.314 * (1 / (u[0] + 273.15) - 1 / (90 + 273.15)))
k2 = exp(theta[2] - theta[3] * 1e4 / 8.314 * (1 / (u[0] + 273.15) - 1 / (90 + 273.15)))
k3 = exp(theta[4] - theta[5] * 1e4 / 8.314 * (1 / (u[0] + 273.15) - 1 / (90 + 273.15)))
k4 = exp(theta[6] - theta[7] * 1e4 / 8.314 * (1 / (u[0] + 273.15) - 1 / (90 + 273.15)))
r1 = k1 * x[4] * x[0]
r2 = k2 * x[4] * x[0]
r3 = k3 * x[4] * x[1]
r4 = k4 * x[4] * x[2]
xdot = vertcat(- r1 - r2, - r3 + r1, - r4 + r2, r3 + r4, - r1 - r2 - r3 - r4) #+\
#vertcat(u[1]*0.6, 0, 0, 0, u[2]*2.4)/2 - \
#(u[1]+u[2]+u[3]) * x/2
# Quadrature
L = [] # x1 ** 2 + x2 ** 2 + 1*u1 ** 2 + 1*u2**2
# Algebraic
alg = []
# Calculate on the fly dynamic sensitivities without the need of perturbations
if sens == 'sensitivity':
xpdot = []
for i in range(np.shape(theta)[0]):
xpdot = vertcat(xpdot, jacobian(xdot, x) @ (x_p[nx * i: nx * i + nx])
+ jacobian(xdot, theta)[nx * i: nx * i + nx])
f = Function('f', [x, u, theta, x_p], [xdot, L, xpdot],
['x', 'u', 'theta', 'xp'], ['xdot', 'L', 'xpdot'])
else:
f = Function('f', [x, u, theta], [xdot, L], ['x', 'u', 'theta'], ['xdot', 'L'])
nu = u.shape
nx = x.shape
ntheta = theta.shape
return f, nu, nx, ntheta
def plant_model(sens):
"""
Define the model that is meant to describe the physical system
:return: model f
"""
nx = 5
ntheta = 8
nu = 4
x = MX.sym('x', nx)
u = MX.sym('u', nu)
theta = MX.sym('theta', ntheta)
x_p = MX.sym('xp', np.shape(x)[0] * np.shape(theta)[0])
k1 = exp(theta[0] - theta[1] * 1e4 / 8.314 * (1 / (u[0] + 273.15) - 1 / (90 + 273.15)))
k2 = exp(theta[2] - theta[3] * 1e4 / 8.314 * (1 / (u[0] + 273.15) - 1 / (90 + 273.15)))
k3 = exp(theta[4] - theta[5] * 1e4 / 8.314 * (1 / (u[0] + 273.15) - 1 / (90 + 273.15)))
k4 = exp(theta[6] - theta[7] * 1e4 / 8.314 * (1 / (u[0] + 273.15) - 1 / (90 + 273.15)))
r1 = k1 * x[4] * x[0]
r2 = k2 * x[4] * x[0]
r3 = k3 * x[4] * x[1]
r4 = k4 * x[4] * x[2]
xdot = vertcat(- r1 - r2, - r3 + r1, - r4 + r2, r3 + r4, - r1 - r2 - r3 - r4) #+\
#vertcat(u[1]*0.6, 0, 0, 0, u[2]*2.4)/2 - \
#(u[1]+u[2]+u[3]) * x/2
# Quadrature
L = [] # x1 ** 2 + x2 ** 2 + 1*u1 ** 2 + 1*u2**2
# Algebraic
alg = []
# Calculate on the fly dynamic sensitivities without the need of perturbations
if sens == 'sensitivity':
xpdot = []
for i in range(np.shape(theta)[0]):
xpdot = vertcat(xpdot, jacobian(xdot, x) @ (x_p[nx * i: nx * i + nx])
+ jacobian(xdot, theta)[nx * i: nx * i + nx])
f = Function('f', [x, u, theta, x_p], [xdot, L, xpdot],
['x', 'u', 'theta', 'xp'], ['xdot', 'L', 'xpdot'])
else:
f = Function('f', [x, u, theta], [xdot, L], ['x', 'u', 'theta'], ['xdot', 'L'])
nu = u.shape
nx = x.shape
ntheta = theta.shape
return f, nu, nx, ntheta
def plant_model_simple(sens):
"""
Define the model that is meant to describe the physical system
:return: model f
"""
nx = 3
ntheta = 4
nu = 2
x = MX.sym('x', nx)
u = MX.sym('u', nu)
theta = MX.sym('theta', ntheta)
x_p = MX.sym('xp', np.shape(x)[0] * np.shape(theta)[0])
k1 = exp(theta[0] - theta[1] * 1e4 / 8.314 * (1 / (u[0] + 273.15)))# - 1 / (90 + 273.15)))
k2 = exp(theta[2] - theta[3] * 1e4 / 8.314 * (1 / (u[0] + 273.15)))# - 1 / (90 + 273.15)))
r1 = k1 * x[0]
r2 = k2 * x[1]
xdot = vertcat(- r1, - r2+r1, r2) #+\
#vertcat(u[1]*0.6, 0, 0, 0, u[2]*2.4)/2 - \
#(u[1]+u[2]+u[3]) * x/2
# Quadrature
L = [] # x1 ** 2 + x2 ** 2 + 1*u1 ** 2 + 1*u2**2
# Algebraic
alg = []
# Calculate on the fly dynamic sensitivities without the need of perturbations
if sens == 'sensitivity':
xpdot = []
for i in range(np.shape(theta)[0]):
xpdot = vertcat(xpdot, jacobian(xdot, x) @ (x_p[nx * i: nx * i + nx])
+ jacobian(xdot, theta)[nx * i: nx * i + nx])
f = Function('f', [x, u, theta, x_p], [xdot, L, xpdot],
['x', 'u', 'theta', 'xp'], ['xdot', 'L', 'xpdot'])
else:
f = Function('f', [x, u, theta], [xdot, L], ['x', 'u', 'theta'], ['xdot', 'L'])
nu = u.shape
nx = x.shape
ntheta = theta.shape
return f, nu, nx, ntheta
def plant_model_real_simple(sens):
"""
Define the model that is meant to describe the physical system
:return: model f
"""
nx = 3
ntheta = 4
nu = 2
x = MX.sym('x', nx)
u = MX.sym('u', nu)
theta = MX.sym('theta', ntheta)
x_p = MX.sym('xp', np.shape(x)[0] * np.shape(theta)[0])
k1 = exp(theta[0] - theta[1] * 1e4 / 8.314 * (1 / (u[0] + 273.15)))# - 1 / (90 + 273.15)))
k2 = exp(theta[2] - theta[3] * 1e4 / 8.314 * (1 / (u[0] + 273.15)))# - 1 / (90 + 273.15)))
r1 = k1 * x[0]
r2 = k2 * x[1]
xdot = vertcat(- r1, - r2+r1, r2) #+\
#vertcat(u[1]*0.6, 0, 0, 0, u[2]*2.4)/2 - \
#(u[1]+u[2]+u[3]) * x/2
# Quadrature
L = [] # x1 ** 2 + x2 ** 2 + 1*u1 ** 2 + 1*u2**2
# Algebraic
alg = []
# Calculate on the fly dynamic sensitivities without the need of perturbations
if sens == 'sensitivity':
xpdot = []
for i in range(np.shape(theta)[0]):
xpdot = vertcat(xpdot, jacobian(xdot, x) @ (x_p[nx * i: nx * i + nx])
+ jacobian(xdot, theta)[nx * i: nx * i + nx])
f = Function('f', [x, u, theta, x_p], [xdot, L, xpdot],
['x', 'u', 'theta', 'xp'], ['xdot', 'L', 'xpdot'])
else:
f = Function('f', [x, u, theta], [xdot, L], ['x', 'u', 'theta'], ['xdot', 'L'])
nu = u.shape
nx = x.shape
ntheta = theta.shape
return f, nu, nx, ntheta
def plant_model_GP(GP, GP1, sens):
"""
Define the model that is meant to describe the physical system
:return: model f
"""
nx = 5
ntheta = 8
nu = 4
#x = SX.sym('x', nx)
u = SX.sym('u', nu)
theta = SX.sym('theta', ntheta)
s = SX.sym('s', ntheta+nu)
x_p = SX.sym('xp', nx * ntheta)
mu, vf = GP.derivatives_gp() # gp_exact_moment([], [], [], [], [*u_t[0, :].T, *theta1.T], s)
#mu_1 = mu((vertcat(u,theta)))
#vv = np.zeros([5, 5])
#for i in range(5):
# vv[i, i] = (vf(vertcat(u,theta))[i]
# + trace(diag(s) @ (0.5 * hvf[i](vertcat(u,theta)).T
# + Jmu(vertcat(u,theta))[i, :].T @ Jmu(vertcat(u,theta))[i, :])))
xdot = mu((vertcat(u, theta))) + vertcat(GP1.GP_predictor1(u)[0][0], 0)
vdot = vf((vertcat(u, theta))) + vertcat(GP1.GP_predictor1(u)[1][0], 0)#vf((vertcat(u,theta)))
# Quadrature
# Calculate on the fly dynamic sensitivities without the need of perturbations
if sens == 'sensitivity':
xpdot = []
for i in range(np.shape(theta)[0]):
xpdot = vertcat(xpdot, jacobian(xdot, theta)[nx * i: nx * i + nx])
f = Function('f', [u, theta, x_p], [xdot, xpdot, vdot],
['u', 'theta', 'xp'], ['xdot', 'xpdot', 'vdot'])
else:
f = Function('f', [u, theta], [xdot], ['u', 'theta'], ['xdot'])
nu = u.shape
ntheta = theta.shape
return f, nu, nx, ntheta
def plant_model_GP_discripancy(GP1, sens):
"""
Define the model that is meant to describe the physical system
:return: model f
"""
nx = 5
ntheta = 8
nu = 4
#x = SX.sym('x', nx)
u = SX.sym('u', nu)
theta = SX.sym('theta', ntheta)
s = SX.sym('s', ntheta+nu)
x_p = SX.sym('xp', nx * ntheta)
#mu, vf = GP.derivatives_gp() # gp_exact_moment([], [], [], [], [*u_t[0, :].T, *theta1.T], s)
#mu_1 = mu((vertcat(u,theta)))
#vv = np.zeros([5, 5])
#for i in range(5):
# vv[i, i] = (vf(vertcat(u,theta))[i]
# + trace(diag(s) @ (0.5 * hvf[i](vertcat(u,theta)).T
# + Jmu(vertcat(u,theta))[i, :].T @ Jmu(vertcat(u,theta))[i, :])))
xdot = vertcat(GP1.GP_predictor1(u)[0][0], 0)
vdot = vertcat(GP1.GP_predictor1(u)[1][0], 0)#vf((vertcat(u,theta)))
# Quadrature
# Calculate on the fly dynamic sensitivities without the need of perturbations
if sens == 'sensitivity':
xpdot = []
for i in range(np.shape(theta)[0]):
xpdot = vertcat(xpdot, jacobian(xdot, theta)[nx * i: nx * i + nx])
f = Function('f', [u, theta, x_p], [xdot, xpdot, vdot],
['u', 'theta', 'xp'], ['xdot', 'xpdot', 'vdot'])
else:
f = Function('f', [u, theta], [xdot], ['u', 'theta'], ['xdot'])
nu = u.shape
ntheta = theta.shape
return f, nu, nx, ntheta
def integrator_model(f, nu, nx, ntheta, s1, s2, dt):
"""
This function constructs the integrator to be suitable with casadi environment, for the equations of the model
and the objective function with variable time step.
inputs: model, sizes
outputs: F: Function([x, u, dt]--> [xf, obj])
"""
M = 4 # RK4 steps per interval
DT = dt#.sym('DT')
DT1 = DT / M
X0 = SX.sym('X0', nx)
U = SX.sym('U', nu)
theta = SX.sym('theta', ntheta)
xp0 = SX.sym('xp', np.shape(X0)[0] * np.shape(theta)[0])
X = X0
Q = 0
G = 0
S = xp0
if s1 == 'embedded':
if s2 == 'sensitivity':
xdot, qj, xpdot = f(X, U, theta, xp0)
dae = {'x': vertcat(X, xp0), 'p': vertcat(U, theta), 'ode': vertcat(xdot, xpdot)}
opts = {'tf': dt} # interval length
F = integrator('F', 'cvodes', dae, opts)
elif s2 == 'identify':
xdot, qj, xpdot = f(X, U, theta, xp0)
dae = {'x': vertcat(X, xp0), 'p': vertcat(U, theta), 'ode': vertcat(xdot, xpdot)}
opts = {'tf': dt} # interval length
F = integrator('F', 'cvodes', dae, opts)
else:
xdot, qj = f(X, U, theta)
dae = {'x': vertcat(X), 'p': vertcat(U, theta), 'ode': vertcat(xdot)}
opts = {'tf': dt} # interval length
F = integrator('F', 'cvodes', dae, opts)
else:
if s2 == 'sensitivity':
for j in range(M):
k1, k1_a, k1_p = f(X, U, theta, S)
k2, k2_a, k2_p = f(X + DT1 / 2 * k1, U, theta, S + DT1 / 2 * k1_p)
k3, k3_a, k3_p = f(X + DT1 / 2 * k2, U, theta, S + DT1 / 2 * k2_p)
k4, k4_a, k4_p = f(X + DT1 * k3, U, theta, S + DT1 * k3_p)
X = X + DT1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
G = G + DT1 / 6 * (k1_a + 2 * k2_a + 2 * k3_a + k4_a)
S = S + DT1 / 6 * (k1_p + 2 * k2_p + 2 * k3_p + k4_p)
F = Function('F', [X0, U, theta, xp0], [X, G, S], ['x0', 'p', 'theta', 'xp0'], ['xf', 'g', 'xp'])
else:
for j in range(M):
k1,_ = f(X, U, theta)
k2,_ = f(X + DT1 / 2 * k1, U, theta)
k3,_ = f(X + DT1 / 2 * k2, U, theta)
k4,_ = f(X + DT1 * k3, U, theta)
X = X + DT1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
F = Function('F', [X0, vertcat(U, theta)], [X], ['x0', 'p'], ['xf'])
return F
def jacobian_f(f):
"""
This function aims to compute the Jacobian of any given MX function
input: MX function
output: Jacobian
"""
F_x = f.jacobian()
return F_x
def maximum_likelihood_est(i, y, y_meas, sigma, k, ymax):
"""
This is a function that computes the MLE for a given set of experiments
"""
# N = 100#y_meas.shape[0]
M = y_meas.shape[1]
MLE = 0
s = 0
for j in range(M):
MLE += 0.5*(y[j]/ymax[j] - y_meas[i][j][k]) **2 /sigma[j]**2
return MLE
def construct_polynomials_basis(d, poly_type):
# Get collocation points
tau_root = np.append(0, collocation_points(d, poly_type))
# Coefficients of the collocation equation
C = np.zeros((d + 1, d + 1))
# Coefficients of the continuity equation
D = np.zeros(d + 1)
# Coefficients of the quadrature function
B = np.zeros(d + 1)
# Construct polynomial basis
for j in range(d + 1):
# Construct Lagrange polynomials to get the polynomial basis at the collocation point
p = np.poly1d([1])
for r in range(d + 1):
if r != j:
p *= np.poly1d([1, -tau_root[r]]) / (tau_root[j] - tau_root[r])
# Evaluate the polynomial at the final time to get the coefficients of the continuity equation
D[j] = p(1.0)
# Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity
# equation
pder = np.polyder(p)
for r in range(d + 1):
C[j, r] = pder(tau_root[r])
# Evaluate the integral of the polynomial to get the coefficients of the quadrature function
pint = np.polyint(p)
B[j] = pint(1.0)
return C, D, B
def fim_for_single_t(xpdot1, Vold, sigma, nx, ntheta,A):
"""
:param xpdot:
:param sigma:
:return: FIM1
"""
xpdot = xpdot1
FIM1 = Vold#np.zeros([2, 2])
for i in range(1):
xp_r = reshape(xpdot, (nx, ntheta))
# vv = np.zeros([ntheta[0], ntheta[0], 1 + N])
# for i in range(0, 1 + N):
# FIM1 += xp_r[:-1,:].T @ np.linalg.inv(np.diag(np.square(sigma[:]))) @ xp_r[:-1,:]#@ A# + np.linalg.inv(np.array([[0.01, 0, 0, 0], [0, 0.05, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0.2]]))
FIM1 += xp_r[:-1,:].T @ inv(diag((sigma[:]))) @ xp_r[:-1,:]#@ A# + np.linalg.inv(np.array([[0.01, 0, 0, 0], [0, 0.05, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0.2]]))
# FIM1 += xp_r.T @ inv(diag(sigma**2)) @ xp_r# + np.linalg.inv(np.array([[0.01, 0, 0, 0], [0, 0.05, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0.2]]))
# FIM = solve(FIM1, SX.eye(FIM1.size1()))
# [Q, R] = qr(FIM1.expand())
return FIM1# + 0.0001)
def collocation(f, d, s, nx, nu, lbx, ubx, lbw, ubw, w0, w,
lbg, ubg, g, x_meas, Xk, k_exp, m, Uk, thetak, h, C, D):
Xc = []
for j in range(d):
Xkj = MX.sym('X_' + str(s) + '_' + str(j), nx)
Xc += [Xkj]
w += [Xkj]
lbw.extend(lbx)
ubw.extend(ubx)
# ubw.extend([u_meas[k_exp][1]])
w0.extend(x_meas[k_exp, :])#, m])
# w0.extend([u_meas[k_exp][1]])
# Loop over collocation points
Xk_end = D[0] * Xk
for j in range(1, d + 1):
# Expression for the state derivative at the collocation point
xp = C[0, j] * Xk
for r in range(d):
xp = xp + C[r + 1, j] * Xc[r]
# Append collocation equations
fj, qj = f(Xc[j - 1], Uk, thetak) # Xpc[j - 1])
g += [(h * fj - xp)]
lbg.extend([-1e-9] * nx)
ubg.extend([1e-9] * nx)
# Add contribution to the end state
Xk_end = Xk_end + D[j] * Xc[j - 1]
# New NLP variable for state at end of interval
Xk = MX.sym('X_' + str(s + 1), nx)
w += [Xk]
lbw.extend(lbx)
ubw.extend(ubx) # [:-1])
# ubw.extend([u_meas[k_exp][1]])
w0.extend(x_meas[k_exp, :])#, m])
# w0.extend([u_meas[k_exp][1]])
# Add equality constraint
g += [Xk_end - Xk]
lbg.extend([-1e-9] * nx)
ubg.extend([1e-9] * nx)
return lbw, ubw, w0, w, lbg, ubg, g, Xk
def chisquare_test(chisquare_value, conf_level, dof):
ref_chisquare = stats.chi2.ppf((conf_level), dof)
p_value = 1 - stats.chi2.cdf(chisquare_value, dof)
return ref_chisquare, chisquare_value
def objective(f, u, V_old, N_exp, nx, n_points, nu, theta, sigma, V, c1o, c2o):
ntheta = np.shape(theta)[1]
x_meas1 = np.zeros([N_exp + 10, nx[0], n_points + 1])
xp_meas = np.zeros((ntheta * nx[0], N_exp * n_points))
dt = np.zeros([N_exp,n_points])
pp = 0
s = 0
x_init = np.zeros([N_exp,nx[0]])
for i in range(nx[0] - 1):
x_init[:N_exp, 0] = c1o * u[:N_exp, 1] / sum(u[:N_exp, i] for i in range(1, nu[0]))
x_init[:N_exp, -1] = c2o * u[:N_exp, 2] / sum(u[:N_exp, i] for i in range(1, nu[0]))
for k0 in range(N_exp):
x11 = x_init[k0, :] # change it
x_meas1[s, :, 0] = np.array(x11.T[:nx[0]])
xp1 = np.zeros([nx[0] * ntheta, 1])
dt[k0, :] = V / np.sum(u[k0, 1:])/n_points
for i in range(n_points):
F = integrator_model(f, nu, nx, ntheta, 'embedded', 'sensitivity', dt[k0, i])
Fk = F(x0=vertcat(x11, xp1), p=vertcat(u[k0, :], theta[:8].reshape((-1,))))
x11 = Fk['xf'][0:nx[0]]
xp1 = Fk['xf'][nx[0]:]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas1[s, :, i + 1] = np.array(x11.T)
xp_meas[:, pp] = np.array(xp1.T)
pp += 1
s += 1
vv1 = V_old
for k in range(N_exp * (n_points)):
xp_r = reshape(xp_meas[:, k], (nx[0], ntheta))
# vv = np.zeros([ntheta[0], ntheta[0], N])
# for i in range(0, N):
# for i in range(ntheta[0]):
# xp_r[:, i] = w_opt[i] * xp_r[:, i]
vv1 += (xp_r[:-1, :].T @ np.linalg.inv(np.diag(np.square(sigma[:]))) @ xp_r[:-1, :])
vv = np.linalg.inv(vv1)
obj = -np.log(np.linalg.det(vv1)+0.0001)#-np.linalg.eig(vv)[0].max()##-np.linalg.eig(vv)[0].max()#
return obj
def objective_pe_mcmc(theta, kwargs):#, ):
f, u, x_meas, N_exp, nx, n_points, nu, V, c1o, c2o, theta2 = kwargs#['f'], kwargs['u_meas'],\
#kwargs['x_meas'], kwargs['N_exp'],\
#kwargs['nx'], kwargs['n_points'],\
#kwargs['nu'], kwargs['V'],\
#kwargs['c1o'], kwargs['c2o']
ntheta = len(theta)
x_meas1 = np.zeros([N_exp, nx[0], n_points + 1])
xmin = np.zeros(nx[0]-1)
xmax = np.zeros(nx[0]-1)#-1)
x_meas_norm = x_meas.copy()
for i in range(nx[0]-1):
xmax[i] = np.max(x_meas[:, i, :])
if xmax[i] > 1e-9:
x_meas_norm[:, i, :] = x_meas[:, i, :]/xmax[i]
else:
x_meas_norm[:, i, :] = x_meas[:, i, :]
xmax[i] = 1.
dt = np.zeros([N_exp, n_points])
pp = 0
s = 0
x_init = np.zeros([N_exp,nx[0]])
for i in range(nx[0] - 1):
x_init[:N_exp, 0] = c1o * u[:N_exp, 1] / sum(u[:N_exp, i] for i in range(1, nu[0]))
x_init[:N_exp, -1] = c2o * u[:N_exp, 2] / sum(u[:N_exp, i] for i in range(1, nu[0]))
mle = 0
for k0 in range(N_exp):
x11 = x_init[k0, :] # change it
x_meas1[s, :, 0] = np.array(x11.T[:nx[0]])
dt[k0, :] = V / np.sum(u[k0, 1:])/n_points
for i in range(n_points):
F = integrator_model(f, nu, nx, ntheta+6, 'embedded', 'mope', dt[k0, i])
Fk = F(x0=vertcat(x11), p=vertcat(u[k0, :], [*theta[:2], *theta2]))
x11 = Fk['xf'][0:nx[0]]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas1[s, :, i + 1] = np.array(x11.T)
pp += 1
mle += maximum_likelihood_est(s, x_meas1[s,:-1,i+1] , x_meas_norm, [1, 1, 1, 1], i, xmax)
s += 1
obj = -mle#np.linalg.eig(vv1)[0][0]#
return obj
def objective_pe(f, u, x_meas, N_exp, nx, n_points, nu, theta, V, c1o, c2o):
ntheta = len(theta)
x_meas1 = np.zeros([N_exp, nx[0], n_points + 1])
xmin = np.zeros(nx[0]-1)
xmax = np.zeros(nx[0]-1)#-1)
x_meas_norm = x_meas.copy()
for i in range(nx[0]-1):
xmax[i] = np.max(x_meas[:, i, :])
if xmax[i] > 1e-9:
x_meas_norm[:, i, :] = x_meas[:, i, :]/xmax[i]
else:
x_meas_norm[:, i, :] = x_meas[:, i, :]
xmax[i] = 1.
dt = np.zeros([N_exp, n_points])
pp = 0
s = 0
x_init = np.zeros([N_exp,nx[0]])
for i in range(nx[0] - 1):
x_init[:N_exp, 0] = c1o * u[:N_exp, 1] / sum(u[:N_exp, i] for i in range(1, nu[0]))
x_init[:N_exp, -1] = c2o * u[:N_exp, 2] / sum(u[:N_exp, i] for i in range(1, nu[0]))
mle = 0
for k0 in range(N_exp):
x11 = x_init[k0, :] # change it
x_meas1[s, :, 0] = np.array(x11.T[:nx[0]])
dt[k0, :] = V / np.sum(u[k0, 1:])/n_points
for i in range(n_points):
F = integrator_model(f, nu, nx, ntheta, 'embedded', 'mope', dt[k0, i])
Fk = F(x0=vertcat(x11), p=vertcat(u[k0, :], theta[:8]))
x11 = Fk['xf'][0:nx[0]]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas1[s, :, i + 1] = np.array(x11.T)
pp += 1
mle += maximum_likelihood_est(s, x_meas1[s,:-1,i+1] , x_meas_norm, [1, 1, 1, 1], i, xmax)
s += 1
obj = mle#np.linalg.eig(vv1)[0][0]#
return obj
def give_data_from_exp(nu, nx, ntheta, N_exp, PC, date, file):
for i in range(1, N_exp + 1):
file[i - 1] = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Peaks and Concentrations_' + str(
i) + '.csv' # '/output_concentrations_'+str(i)+'.csv'
size = np.shape(np.array(pd.read_csv(file[0])))
xl = np.zeros([N_exp, size[0] - 1, 1]) # size[1]])
for i in range(N_exp):
xl[i, :, :] = np.array(pd.read_csv(file[i])['Concentration (mol/L)'])[1:].reshape(4, 1)
for j in range(size[0] - 1):
for k in range(1):
if xl[i, j, k] < 0:
xl[i, j, k] = 0.
for i in range(1, N_exp + 1):
if i >= 10:
file[i - 1] = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Requests_' + str(i) + '.csv'
else:
file[i - 1] = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Requests_0' + str(i) + '.csv'
size = np.shape(np.array(pd.read_csv(file[0])))
ul = np.zeros([N_exp, size[0], size[1]])
for i in range(N_exp):
ul[i, :] = np.array(pd.read_csv(file[i]))
n_points = 1
n = 1
"""
change it
"""
x_meas = np.zeros((N_exp + 30, nx[0] - 1, n_points + 1))
u_meas = np.zeros((N_exp + 30, nu[0]))
# -------------- Change the concentrations --------------#
# u[0] -----> T
#
"""
u[0] ---> T
u[1] ---> F1
u[2] ---> F2
u[3] ---> F3
x[0] ---> c1
x[1] ---> c3
x[2] ---> c4
x[3] ---> c5
x[4] ---> c2 --- NOT
"""
# ------------------------------------------------------- #
dt = np.zeros([N_exp + 30, n_points])
"""""
for i in range(N_exp):
x_meas[i, 0, :] = xl[i, 0:n * n_points + 1:n, 1].T
x_meas[i, 1:nx[0]-1, :] = xl[i, 0:n*n_points + 1:n, 3:(nx[0]-1)+2].T
#x_meas[i, -1, :] = xl[i, 0:n*n_points + 1:n, 2].T
"""""
setup = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Exp_Setup_Info_06-September-2019_11_34_19.csv'
setup1 = np.array(pd.read_csv(setup))[0]
c1o = setup1[2] # 2.03
c2o = setup1[1] # 4.17
V = setup1[0] # 2.7
for i in range(N_exp):
x_meas[i, 0, n_points] = xl[i, 0]
x_meas[i, 1, n_points] = xl[i, 3]
x_meas[i, 2, n_points] = xl[i, 2]
x_meas[i, 3, n_points] = xl[i, 1]
u_meas[i, 1] = ul[i][0][1]
u_meas[i, 2] = ul[i][0][0]
u_meas[i, 3] = ul[i][0][2]
u_meas[i, 0] = ul[i][0][-1]
x_meas[i, 0, 0] = c1o * u_meas[i, 1] / sum(u_meas[i, j] for j in range(1, nu[0]))
x_meas[i, 1, 0] = 0.
x_meas[i, 2, 0] = 0.
x_meas[i, 3, 0] = 0.
dt[i, :] = V / sum(u_meas[i, 1:]) # xl[i, n:n*n_points + 1:n, 0].T - xl[i, 0:(n)*n_points :n, 0].T
return x_meas, u_meas, V, c1o, c2o, dt
def give_data_from_exp_recal(nu, nx, ntheta, N_exp, PC, date, file, labot):
for i in range(1, N_exp + 1):
file[i - 1] = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Peaks and Concentrations_' + str(
i) + '.csv' # '/output_concentrations_'+str(i)+'.csv'
size = np.shape(np.array(pd.read_csv(file[0])))
xl = np.zeros([N_exp, size[0] - 1, 1]) # size[1]])
for i in range(N_exp):
xl[i, :, :] = np.array(pd.read_csv(file[i])['Area'])[1:].reshape(4, 1)
for j in range(size[0] - 1):
for k in range(1):
if xl[i, j, k] < 0:
xl[i, j, k] = 0.
for i in range(N_exp):
if labot == 1:
a1 = 0.4078
a2 = 0.7505
a3 = 0.1939
a4 = 0.5987
else:
a1 = 0.4117
a2 = 0.7898
a3 = 0.1967
a4 = 0.6123
c1 = np.zeros(N_exp)
for i in range(N_exp):
c1[i] = np.array(pd.read_csv(file[i])['Area'])[0]
cr = 0.101
for i in range(1, N_exp + 1):
if i >= 10:
file[i - 1] = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Requests_' + str(i) + '.csv'
else:
file[i - 1] = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Requests_0' + str(i) + '.csv'
size = np.shape(np.array(pd.read_csv(file[0])))
ul = np.zeros([N_exp, size[0], size[1]])
for i in range(N_exp):
ul[i, :] = np.array(pd.read_csv(file[i]))
n_points = 1
n = 1
"""
change it
"""
x_meas = np.zeros((N_exp + 30, nx[0] - 1, n_points + 1))
u_meas = np.zeros((N_exp + 30, nu[0]))
# -------------- Change the concentrations --------------#
# u[0] -----> T
#
"""
u[0] ---> T
u[1] ---> F1
u[2] ---> F2
u[3] ---> F3
x[0] ---> c1
x[1] ---> c3
x[2] ---> c4
x[3] ---> c5
x[4] ---> c2 --- NOT
"""
# ------------------------------------------------------- #
dt = np.zeros([N_exp + 30, n_points])
"""""
for i in range(N_exp):
x_meas[i, 0, :] = xl[i, 0:n * n_points + 1:n, 1].T
x_meas[i, 1:nx[0]-1, :] = xl[i, 0:n*n_points + 1:n, 3:(nx[0]-1)+2].T
#x_meas[i, -1, :] = xl[i, 0:n*n_points + 1:n, 2].T
"""""
setup = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Exp_Setup_Info_06-September-2019_11_34_19.csv'
setup1 = np.array(pd.read_csv(setup))[0]
c1o = setup1[2] # 2.03
c2o = setup1[1] # 4.17
V = setup1[0] # 2.7
for i in range(N_exp):
u_meas[i, 1] = ul[i][0][1]
u_meas[i, 2] = ul[i][0][0]
u_meas[i, 3] = ul[i][0][2]
u_meas[i, 0] = ul[i][0][-1]
crr = cr * u_meas[i, 1] / sum(u_meas[i, j] for j in range(1, nu[0]))
x_meas[i, 0, n_points] = 1/a1 * crr/c1[i] * xl[i, 0]
x_meas[i, 1, n_points] = 1/a2 * crr/c1[i] * xl[i, 3]
x_meas[i, 2, n_points] = 1/a3 * crr/c1[i] * xl[i, 2]
x_meas[i, 3, n_points] = 1/a4 * crr/c1[i] * xl[i, 1]
x_meas[i, 0, 0] = c1o * u_meas[i, 1] / sum(u_meas[i, j] for j in range(1, nu[0]))
x_meas[i, 1, 0] = 0.
x_meas[i, 2, 0] = 0.
x_meas[i, 3, 0] = 0.
dt[i, :] = V / sum(u_meas[i, 1:]) # xl[i, n:n*n_points + 1:n, 0].T - xl[i, 0:(n)*n_points :n, 0].T
return x_meas, u_meas, V, c1o, c2o, dt
def give_data_from_sim_simple(N_exp, true_theta):
n_points = 1
n = 1
f, nu, nx, ntheta = plant_model_real_simple([])
"""
change it
"""
Tmax = 373-273.15
Tmin = 333-273.15
Fmax = 0.008
Fmin = 0.004
u_norm = pyDoE.lhs(nu[0], N_exp)
u_meas = [Tmax-Tmin, Fmax-Fmin] * pyDoE.lhs(nu[0], N_exp) + [Tmin, Fmin]
x_meas = np.zeros((N_exp + 30, nx[0], n_points + 1))
#u_meas = np.zeros((N_exp + 30, nu[0]))
# -------------- Change the concentrations --------------#
# u[0] -----> T
#
"""
u[0] ---> T
u[1] ---> F1
u[2] ---> F2
u[3] ---> F3
x[0] ---> c1
x[1] ---> c3
x[2] ---> c4
x[3] ---> c5
x[4] ---> c2 --- NOT
"""
# ------------------------------------------------------- #
dt = np.zeros([N_exp + 32, n_points])
"""""
for i in range(N_exp):
x_meas[i, 0, :] = xl[i, 0:n * n_points + 1:n, 1].T
x_meas[i, 1:nx[0]-1, :] = xl[i, 0:n*n_points + 1:n, 3:(nx[0]-1)+2].T
#x_meas[i, -1, :] = xl[i, 0:n*n_points + 1:n, 2].T
"""""
#setup = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Exp_Setup_Info_06-September-2019_11_34_19.csv'
#setup1 = np.array(pd.read_csv(setup))[0]
#c1o = setup1[2] # 2.03
#c2o = setup1[1] # 4.17
#V = setup1[0] # 2.7
for i in range(N_exp):
#u_meas[i, 1] = ul[i][0][1]
#u_meas[i, 2] = ul[i][0][0]
#u_meas[i, 3] = ul[i][0][2]
#u_meas[i, 0] = ul[i][0][-1]
x_meas[i, 0, 0] = 2. #* u_meas[i, 1] / sum(u_meas[i, j] for j in range(1, nu[0]))
x_meas[i, 1, 0] = 0.
x_meas[i, 2, 0] = 0.
bed_length = 1.2#200 # channel length in cm
Ac = 25#4.91 * (10 ** -4)
V = Ac * bed_length# * 1e-3
dt[i, :] = V / sum(u_meas[i, 1:]) # xl[i, n:n*n_points + 1:n, 0].T - xl[i, 0:(n)*n_points :n, 0].T
x_init = np.zeros([N_exp, nx[0]])
for i in range(nx[0] - 1):
x_init[:N_exp, i] = x_meas[:N_exp, i, 0]
pp = 0
s = 0
for k0 in range(N_exp):
x11 = x_init[k0, :] # change it
for i in range(n_points):
F = integrator_model(f, nu, nx, ntheta, 'embedded', 'nope', dt[k0, i])
Fk = F(x0=vertcat(x11), p=vertcat(u_meas[k0, :],true_theta))
x11 = Fk['xf'][0:nx[0]]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas[s, :, i + 1] = np.array(x11[0:nx[0]].T)
s += 1
return x_meas, u_meas, V, dt
def give_data_from_sim_update_simple(k_exp, x_meas, u_opt, dt, true_theta, c1o, c2o, V):
f, nu, nx, ntheta = plant_model_real([])
x_meas[k_exp, 0, 0] = c1o * u_opt[1] / sum(u_opt[ 1:]) # u_opt[1]/sum(u_opt[1:])
x_meas[k_exp, 1, 0] = 0.
x_meas[k_exp, 2, 0] = 0.
x_init = np.zeros([1, x_meas.shape[1]+1])
for i in range(nx[0]):
x_init[0, i] = x_meas[k_exp, i, 0]
x11 = x_init[0, :]
dt[k_exp, :] = V / sum(u_opt[1:]) # sum(u_opt[1:])#xl[0, n:n * n_points + 1:n, 0].T - xl[0, 0: n * n_points :n, 0].T
for i in range(1):
F = integrator_model(f, nu, nx, ntheta, 'embedded', 'no', dt[k_exp, i])
Fk = F(x0=vertcat(x11), p=vertcat(u_opt, true_theta))
x11 = Fk['xf'][0:nx[0]]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas[k_exp,:, i+1] = np.array(x11.T)
return x_meas, dt
def give_data_from_sim_update_simple1(k_exp, u_opt, dt, true_theta, c1o, c2o, V):
f, nu, nx, ntheta = plant_model_real([])
x_meas = np.zeros([5,2])
x_meas[0, 0] = c1o * u_opt[1] / sum(u_opt[ 1:]) # u_opt[1]/sum(u_opt[1:])
x_meas[1, 0] = 0.
x_meas[2, 0] = 0.
x_init = np.zeros([1, nx[0]])
for i in range(nx[0]):
x_init[0, i] = x_meas[i, 0]
x11 = x_init[0, :]
dt[k_exp, :] = V / sum(u_opt[1:]) # sum(u_opt[1:])#xl[0, n:n * n_points + 1:n, 0].T - xl[0, 0: n * n_points :n, 0].T
for i in range(1):
F = integrator_model(f, nu, nx, ntheta, 'embedded', 'no', dt[k_exp, i])
Fk = F(x0=vertcat(x11), p=vertcat(u_opt, true_theta))
x11 = Fk['xf'][0:nx[0]]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas[:, i+1] = np.array(x11.T)
return x_meas, dt
def give_data_from_sim(N_exp, PC, date, file, true_theta):
for i in range(1, N_exp + 1):
file[i - 1] = '/Users/' + PC + '/OneDrive - University College London/Leeds_working_space - ss - Exp - BO - TR/zippedRuns/' + date + '/Peaks and Concentrations_' + str(
i) + '.csv' # '/output_concentrations_'+str(i)+'.csv'
size = np.shape(np.array(pd.read_csv(file[0])))
xl = np.zeros([N_exp, size[0] - 1, 1]) # size[1]])
for i in range(N_exp):
xl[i, :, :] = np.array(pd.read_csv(file[i])['Concentration (mol/L)'])[1:].reshape(4, 1)
for j in range(size[0] - 1):
for k in range(1):
if xl[i, j, k] < 0:
xl[i, j, k] = 0.
for i in range(1, N_exp + 1):
if i >= 10:
file[i - 1] = '/Users/' + PC + '/OneDrive - University College London/Leeds_working_space - ss - Exp - BO - TR/zippedRuns/' + date + '/Requests_' + str(i) + '.csv'
else:
file[i - 1] = '/Users/' + PC + '/OneDrive - University College London/Leeds_working_space - ss - Exp - BO - TR/zippedRuns/' + date + '/Requests_0' + str(i) + '.csv'
size = np.shape(np.array(pd.read_csv(file[0])))
ul = np.zeros([N_exp, size[0], size[1]])
for i in range(N_exp):
ul[i, :] = np.array(pd.read_csv(file[i]))
n_points = 1
n = 1
f, nu, nx, ntheta = plant_model_real([])
"""
change it
"""
x_meas = np.zeros((N_exp + 30, nx[0] - 1, n_points + 1))
u_meas = np.zeros((N_exp + 30, nu[0]))
# -------------- Change the concentrations --------------#
# u[0] -----> T
#
"""
u[0] ---> T
u[1] ---> F1
u[2] ---> F2
u[3] ---> F3
x[0] ---> c1
x[1] ---> c3
x[2] ---> c4
x[3] ---> c5
x[4] ---> c2 --- NOT
"""
# ------------------------------------------------------- #
dt = np.zeros([N_exp + 32, n_points])
"""""
for i in range(N_exp):
x_meas[i, 0, :] = xl[i, 0:n * n_points + 1:n, 1].T
x_meas[i, 1:nx[0]-1, :] = xl[i, 0:n*n_points + 1:n, 3:(nx[0]-1)+2].T
#x_meas[i, -1, :] = xl[i, 0:n*n_points + 1:n, 2].T
"""""
setup = '/Users/' + PC + '/OneDrive - University College London/Leeds_working_space - ss - Exp - BO - TR/zippedRuns/' + date + '/Exp_Setup_Info_06-September-2019_11_34_19.csv'
setup1 = np.array(pd.read_csv(setup))[0]
c1o = setup1[2] # 2.03
c2o = setup1[1] # 4.17
V = setup1[0] # 2.7
for i in range(N_exp):
u_meas[i, 1] = ul[i][0][1]
u_meas[i, 2] = ul[i][0][0]
u_meas[i, 3] = ul[i][0][2]
##
u_meas[i, 0] = ul[i][0][-1]
x_meas[i, 0, 0] = c1o * u_meas[i, 1] / sum(u_meas[i, j] for j in range(1, nu[0]))
x_meas[i, 1, 0] = 0.
x_meas[i, 2, 0] = 0.
x_meas[i, 3, 0] = 0.
dt[i, :] = V / sum(u_meas[i, 1:]) # xl[i, n:n*n_points + 1:n, 0].T - xl[i, 0:(n)*n_points :n, 0].T
x_init = np.zeros([N_exp, nx[0]])
for i in range(nx[0] - 1):
x_init[:N_exp, i] = x_meas[:N_exp, i, 0]
x_init[:N_exp, -1] = c2o * u_meas[:N_exp, 2] / sum(u_meas[:N_exp, i] for i in range(1, nu[0]))
pp = 0
s = 0
for k0 in range(N_exp):
x11 = x_init[k0, :] # change it
for i in range(n_points):
F = integrator_model(f, nu, nx, ntheta, 'embedded', 'nope', dt[k0, i])
Fk = F(x0=vertcat(x11), p=vertcat(u_meas[k0, :],true_theta))
x11 = Fk['xf'][0:nx[0]]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas[s, :, i + 1] = np.array(x11[0:nx[0]-1].T)
s += 1
return x_meas, u_meas, V, c1o, c2o, dt
def give_data_from_sim_update(k_exp, x_meas, u_opt, dt, true_theta, c1o, c2o, V):
f, nu, nx, ntheta = plant_model_real([])
x_meas[k_exp, 0, 0] = c1o * u_opt[1] / sum(u_opt[ 1:]) # u_opt[1]/sum(u_opt[1:])
x_meas[k_exp, 1, 0] = 0.
x_meas[k_exp, 2, 0] = 0.
x_meas[k_exp, 3, 0] = 0.
x_init = np.zeros([1, x_meas.shape[1]+1])
for i in range(nx[0]-1):
x_init[0, i] = x_meas[k_exp, i, 0]
x_init[0, -1] = c2o * u_opt[2] / sum(u_opt[i] for i in range(1,u_opt.shape[0]))
x11 = x_init[0, :]
dt[k_exp, :] = V / sum(u_opt[1:]) # sum(u_opt[1:])#xl[0, n:n * n_points + 1:n, 0].T - xl[0, 0: n * n_points :n, 0].T
for i in range(1):
F = integrator_model(f, nu, nx, ntheta, 'embedded', 'no', dt[k_exp, i])
Fk = F(x0=vertcat(x11), p=vertcat(u_opt, true_theta))
x11 = Fk['xf'][0:nx[0]]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas[k_exp,:, i+1] = np.array(x11[:-1].T)
return x_meas, dt
def give_data_from_sim_update1(k_exp, u_opt, dt, true_theta, c1o, c2o, V):
f, nu, nx, ntheta = plant_model_real([])
x_meas = np.zeros([5,2])
x_meas[0, 0] = c1o * u_opt[1] / sum(u_opt[ 1:]) # u_opt[1]/sum(u_opt[1:])
x_meas[1, 0] = 0.
x_meas[2, 0] = 0.
x_meas[3, 0] = 0.
x_init = np.zeros([1, nx[0]])
for i in range(nx[0]-1):
x_init[0, i] = x_meas[i, 0]
x_init[0, -1] = c2o * u_opt[2] / sum(u_opt[i] for i in range(1,u_opt.shape[0]))
x11 = x_init[0, :]
dt[k_exp, :] = V / sum(u_opt[1:]) # sum(u_opt[1:])#xl[0, n:n * n_points + 1:n, 0].T - xl[0, 0: n * n_points :n, 0].T
for i in range(1):
F = integrator_model(f, nu, nx, ntheta, 'embedded', 'no', dt[k_exp, i])
Fk = F(x0=vertcat(x11), p=vertcat(u_opt, true_theta))
x11 = Fk['xf'][0:nx[0]]
# + np.random.multivariate_normal([0.] * nx[0], np.diag(np. square(sigma))).T
x_meas[:, i+1] = np.array(x11.T)
return x_meas, dt
def compute_rf(nu, nx, ntheta, N_exp, PC, date, file):
for i in range(1, N_exp + 1):
file[i - 1] = '/Users/' + PC + '/Dropbox/UCL/' + date + '/Peaks and Concentrations_' + str(
i) + '.csv' # '/output_concentrations_'+str(i)+'.csv'
size = np.shape(np.array( | pd.read_csv(file[0]) | pandas.read_csv |
import requests
import urllib
from requests import post
import pandas as pd
import json
import os.path
import datetime
# import requests
##Definition to grab data off of the redcap API
def getData(data):
r = post("https://redcap.duke.edu/redcap/api/", data)
r.content
d = urllib.parse.urlencode(data).encode("utf-8")
req = urllib.request.Request("https://redcap.duke.edu/redcap/api/", d)
response = urllib.request.urlopen(req)
file = response.read()
result = json.loads(file)
df = | pd.DataFrame.from_records(result) | pandas.DataFrame.from_records |
import pytest
import gpmap
from epistasis import models
import numpy as np
import pandas as pd
import os
def test__genotypes_to_X(test_data):
# Make sure function catches bad genotype passes
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# Duplicated
g = list(gpm.genotype)
g.extend(g)
# not in gpmap
b = list(gpm.genotype)
b.append("stupid")
bad_genotypes = [g,b]
for bad in bad_genotypes:
with pytest.raises(ValueError):
models.base._genotypes_to_X(bad,gpm,order=1,model_type="local")
# Sample through various model comobos
allowed = {"local":set([0,1]),
"global":set([-1,1])}
for d in test_data:
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
for i in range(1,gpm.length+1,1):
for model_type in ["local","global"]:
X = models.base._genotypes_to_X(gpm.genotype,
gpm,
order=i,
model_type=model_type)
assert X.shape[0] == len(gpm.genotype)
assert set(np.unique(X)).issubset(allowed[model_type])
def test_arghandler_decorator():
class Yo:
def _a(self,data=5,method=None):
return data
def _b(self,data=None,method=None):
return 6
@models.base.arghandler
def test_method(self,a=None,b=None,**kwargs):
return a, b
@models.base.arghandler
def bad_method(self,c=None,d=None,**kwargs):
return c, d
yo = Yo()
assert yo.test_method() == (None,6)
assert yo.test_method(a=5) == (5,6)
assert yo.test_method(a=10) == (10,6)
assert yo.test_method(b=10) == (None,6)
with pytest.raises(AttributeError):
yo.bad_method()
### Tests for AbstractModel:
# AbstractModel cannot be instantiated on its own, as it is designed to be a
# mixin with sklearn classes. Many methods have to be defined in subclass
# (.fit, .predict, etc.) These will not be tested here, but instead in the
# subclass tests. For methods defined here that are never redefined in subclass
# (._X, .add_gpm, etc.) we test using the simplest mixed/subclass
# (EpistasisLinearRegression).
def test_abstractmodel_predict_to_df(test_data):
"""
Test basic functionality. Real test of values will be done on .predict
for subclasses.
"""
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
# This should fail -- no fit run
with pytest.raises(Exception):
df = m.predict_to_df()
m.fit()
# This should work
df = m.predict_to_df()
assert type(df) is type(pd.DataFrame())
assert len(df) == len(d["genotype"])
# Create and fit a new model.
m = models.linear.EpistasisLinearRegression()
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# No gpm added -- should fail
with pytest.raises(RuntimeError):
m.predict_to_df()
m.add_gpm(gpm)
m.fit()
df = m.predict_to_df(genotypes=d["genotype"][0])
assert len(df) == 1
bad_stuff = [1,{},[1,2],"STUPID",["STUPID","IS","REAL"]]
for b in bad_stuff:
with pytest.raises(ValueError):
print(f"Trying bad genotypes {b}")
m.predict_to_df(genotypes=b)
df = m.predict_to_df(genotypes=d["genotype"][:3])
assert len(df) == 3
def test_abstractmodel_predict_to_csv(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
csv_file = os.path.join(tmp_path,"tmp.csv")
m.predict_to_csv(filename=csv_file)
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == len(d["genotype"])
# Make sure genotypes pass works
m.predict_to_csv(filename=csv_file,genotypes=d["genotype"][0])
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == 1
def test_abstractmodel_predict_to_excel(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
excel_file = os.path.join(tmp_path,"tmp.xlsx")
m.predict_to_excel(filename=excel_file)
assert os.path.exists(excel_file)
df = | pd.read_excel(excel_file) | pandas.read_excel |
print(__doc__)
# ref: http://www.agcross.com/blog/2015/02/05/random-forests-in-python-with-scikit-learn/
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.datasets import load_iris
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['is_train'] = np.random.uniform(0, 1, len(df)) <= .75
df['species'] = | pd.Categorical.from_codes(iris.target, iris.target_names) | pandas.Categorical.from_codes |
'''
Copyright (c) 2019. IIP Lab, Wuhan University
Created by <NAME>, <NAME> and <NAME>, All right reserved
'''
import os
import argparse
import numpy as np
import pandas as pd
p_train = 0.8
p_val = 0.2
total_num = 10287
def split_data(total_num, split_idx):
'''
get the indexes for the train, val
and test data
'''
data_idxes = np.random.permutation(total_num)
train_num = int(total_num * p_train * p_train)
val_num = int(total_num * p_train * (1 - p_train))
train_idxes = data_idxes[ :train_num]
val_idxes = data_idxes[train_num: train_num + val_num]
test_idxes = data_idxes[train_num + val_num: ]
if not os.path.exists("{}".format(split_idx)):
os.makedirs("{}".format(split_idx))
pd.DataFrame({"train_idxes" : train_idxes}).to_csv("{}/train.txt".format(split_idx) , header=None, index=False)
pd.DataFrame({"val_idxes" : val_idxes}).to_csv("{}/val.txt".format(split_idx) , header=None, index=False)
| pd.DataFrame({"test_idxes": test_idxes}) | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
from poor_trader import chart
from poor_trader import utils
TRADE_DAYS_PER_YEAR = 244
def SQN(df_trades):
"""
System Quality Number = (Expectancy / Standard Deviation R) * sqrt(Number of Trades)
:param df_trades:
:return:
"""
try:
sqn = (df_trades.LastRMultiple.mean() / df_trades.LastRMultiple.std()) * np.sqrt(len(df_trades.index.values))
return np.round(sqn, 2)
except:
return 0
def drawdown(equities):
return -np.round(equities.max() - equities[-1], 4)
def drawdown_pct(equities):
dd = equities[-1] - equities.max()
dd_pct = 100 * dd / equities.max()
return np.round(dd_pct, 2)
def exposure(open_trades, portfolio_equity):
return open_trades['LastValue'].apply(lambda last_value: 100 * last_value / portfolio_equity)
def exposure_pct(df_trades, df_backtest, starting_capital):
df = pd.DataFrame()
def calc(row):
date = row.name
cur_trades = backtest.update_open_trades_last_value(df_trades[df_trades['StartDate'] <= date], date=date)
portfolio_equity = starting_capital + cur_trades['LastPnL'].sum()
open_trades = cur_trades[pd.isnull(cur_trades['EndDate'])]
return open_trades['LastPnL'].sum() / portfolio_equity
df['Exposure'] = df_backtest.apply(calc, axis=1)
return df['Exposure']
def avg_expectancy(df_trades):
return df_trades['LastPnL'].mean()
def avg_expectancy_pct(df_trades):
expectancy_pct = 100 * df_trades['LastPnL'] / df_trades['BuyValue']
return expectancy_pct.mean()
def avg_bars_held(df_backtest, df_trades):
bars_held = df_trades.apply(lambda trade: len(df_backtest.loc[pd.to_datetime(trade['StartDate']):pd.to_datetime(trade['LastRecordDate'])].index.values), axis=1)
bars_held = bars_held.dropna()
if bars_held.empty:
return 0
return np.round(bars_held.mean(), 2)
def max_drawdown(df_backtest):
return df_backtest['Equity'].expanding().apply(drawdown).min()
def max_pct_drawdown(df_backtest):
return df_backtest['Equity'].expanding().apply(drawdown_pct).min()
def ulcer_index(df_backtest):
df_dd = df_backtest['Equity'].expanding().apply(drawdown_pct)
squared_dd = df_dd * df_dd
return np.sqrt(squared_dd.sum()) / squared_dd.count()
def performance_data(starting_capital, df_backtest, df_trades, index='Performance'):
df = pd.DataFrame()
equities = df_backtest['Equity'].values
years = len(equities) / TRADE_DAYS_PER_YEAR
ending_capital = df_backtest['Equity'].values[-1]
net_profit = ending_capital - starting_capital
net_profit_pct = 100 * net_profit / starting_capital
annualized_gain = ((ending_capital/starting_capital)**(1/years) - 1)
max_system_dd = max_drawdown(df_backtest)
max_system_pct_dd = max_pct_drawdown(df_backtest)
max_peak = df_backtest.Equity.max()
df_winning_trades = df_trades[df_trades['LastPnL'] > 0]
df_losing_trades = df_trades[df_trades['LastPnL'] <= 0]
ui = ulcer_index(df_backtest)
avg_bars_held_value = avg_bars_held(df_backtest, df_trades)
avg_expectancy_pct_value = avg_expectancy_pct(df_trades)
risk_free_rate = 0.01
df.loc[index, 'Number of Trading Days'] = df_backtest.Equity.count()
df.loc[index, 'Starting Capital'] = starting_capital
df.loc[index, 'Ending Capital'] = ending_capital
df.loc[index, 'Net Profit'] = net_profit
df.loc[index, 'Net Profit %'] = net_profit_pct
df.loc[index, 'SQN'] = SQN(df_trades)
df.loc[index, 'Annualized Gain'] = annualized_gain
df.loc[index, 'Max Profit'] = df_trades.LastPnL.max()
df.loc[index, 'Max Loss'] = df_trades.LastPnL.min()
df.loc[index, 'Number of Trades'] = len(df_trades.index.values)
df.loc[index, 'Winning Trades'] = len(df_winning_trades.index.values)
df.loc[index, 'Losing Trades'] = len(df_losing_trades.index.values)
try:
df.loc[index, 'Winning Trades %'] = np.round(100 * (len(df_winning_trades.index.values) / len(df_trades.index.values)), 2)
except:
df.loc[index, 'Winning Trades %'] = 0
df.loc[index, 'Avg Profit/Loss'] = avg_expectancy(df_trades)
df.loc[index, 'Avg Profit'] = avg_expectancy(df_winning_trades)
df.loc[index, 'Avg Loss'] = avg_expectancy(df_losing_trades)
df.loc[index, 'Avg Profit/Loss %'] = avg_expectancy_pct_value
df.loc[index, 'Avg Profit %'] = avg_expectancy_pct(df_winning_trades)
df.loc[index, 'Avg Loss %'] = avg_expectancy_pct(df_losing_trades)
df.loc[index, 'Avg Bars Held'] = avg_bars_held_value
df.loc[index, 'Avg Winning Bars Held'] = avg_bars_held(df_backtest, df_winning_trades)
df.loc[index, 'Avg Losing Bars Held'] = avg_bars_held(df_backtest, df_losing_trades)
df.loc[index, 'Max System Drawdown'] = max_system_dd
df.loc[index, 'Max System % Drawdown'] = max_system_pct_dd
df.loc[index, 'Max Peak'] = max_peak
df.loc[index, 'Recovery Factor'] = net_profit / abs(max_system_pct_dd)
try:
df.loc[index, 'Profit Factor'] = df_winning_trades['LastPnL'].sum() / abs(df_losing_trades['LastPnL'].sum())
except:
df.loc[index, 'Profit Factor'] = 0.0
df.loc[index, 'Payoff Ratio'] = df_winning_trades['LastPnL'].mean() / abs(df_losing_trades['LastPnL'].mean())
return utils.round_df(df, places=2)
def generate_equity_curve(df_trades, starting_balance, historical_data, selling_fees_method=None, start_date=None, end_date=None):
df_trades['StartDate'] = pd.to_datetime(df_trades['StartDate'])
df_trades['EndDate'] = pd.to_datetime(df_trades['EndDate'])
df_trades['LastRecordDate'] = | pd.to_datetime(df_trades['LastRecordDate']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
r"""
Created on Wed Apr 1 21:46:59 2020
@author: <NAME>
This code generates quality metrics for a session given an eid and probe name.
Then computes a label based on:
1) refractor period violations
2) amplitude distribution Cutoff
3) mean amplitudes
And adds these metrics and labels to a csv file to load in phy.
"""
import time
import os
from oneibl.one import ONE
from pathlib import Path
import numpy as np
import alf.io as aio
import matplotlib.pyplot as plt
import brainbox as bb
from phylib.stats import correlograms
import pandas as pd
from defined_metrics import *
def gen_metrics_labels(eid,probe_name):
one = ONE()
ses_path=one.path_from_eid(eid)
alf_probe_dir = os.path.join(ses_path, 'alf', probe_name)
ks_dir = alf_probe_dir
spks_b = aio.load_object(alf_probe_dir, 'spikes')
units_b = bb.processing.get_units_bunch(spks_b)
units = list(units_b.amps.keys())
lengths_samples = [len(v) for k, v in units_b.samples.items()]
units_nonzeros=[i for i,d in enumerate(lengths_samples) if d>0]
n_units = len(units_nonzeros) #only compute metrics for units with no samples
#for cases where raw data is available locally:
ephys_file_dir = os.path.join(ses_path, 'raw_ephys_data', probe_name)
ephys_file = os.path.join(ses_path, 'raw_ephys_data', probe_name,'_iblrig_ephysData.raw_g0_t0.imec.ap.cbin')
#create params.py file
params_file = os.path.join(ks_dir,'params.py')
if os.path.exists(ephys_file) and not os.path.exists(params_file):
f = open(params_file,"w+")
f.write('dat_path = ' + 'r"' + ephys_file + '"\n' + '''n_channels_dat = 385
dtype = 'int16'
offset = 0
sample_rate = 30000
hp_filtered = False
uidx=0''' )
f.close()
# Initialize metrics
cum_amp_drift = np.full((n_units,), np.nan)
cum_depth_drift = np.full((n_units,), np.nan)
cv_amp = np.full((n_units,), np.nan)
cv_fr = np.full((n_units,), np.nan)
frac_isi_viol = np.full((n_units,), np.nan)
frac_missing_spks = np.full((n_units,), np.nan)
fp_estimate = np.full((n_units,), np.nan)
presence_ratio = np.full((n_units,), np.nan)
pres_ratio_std = np.full((n_units,), np.nan)
ptp_sigma = np.full((n_units,), np.nan)
units_missing_metrics = set()
label=np.empty([len(units)])
RefPViol = np.empty([len(units)])
NoiseCutoff = np.empty([len(units)])
MeanAmpTrue = np.empty([len(units)])
for idx,unit in enumerate(units_nonzeros):
if unit == units_nonzeros[0]:
t0 = time.perf_counter() # used for computation time estimate
print('computing metrics for unit ' + str(unit) + '...' )
#load relevant data for unit
ts = units_b['times'][str(unit)]
amps = units_b['amps'][str(unit)]
samples = units_b['samples'][str(unit)]
depths = units_b['depths'][str(unit)]
RefPViol[idx] = FP_RP(ts)
NoiseCutoff[idx] = noise_cutoff(amps,quartile_length=.25)
#create 'label' based on RPviol,NoiseCutoff, and MeanAmp
if len(samples>50): #only compute mean amplitude for units with more than 50 samples
try:
MeanAmpTrue[int(unit)] = peak_to_peak_amp(ephys_file, samples, nsamps=20)
if (FP_RP(ts) and noise_cutoff(amps,quartile_length=.25)<20 and MeanAmpTrue[int(unit)]>50) :
label[idx] = 1
else:
label[idx] = 0
except:
if (FP_RP(ts) and noise_cutoff(amps,quartile_length=.25)<20) :
label[idx] = 1
else:
label[idx] = 0
else: #no ephys file, do not include true mean amps
if (FP_RP(ts) and noise_cutoff(amps,quartile_length=.25)<20) :
label[idx] = 1
else:
label[idx] = 0
#now compute additional metrics that label does not depend on:
# Cumulative drift of spike amplitudes, normalized by total number of spikes.
try:
cum_amp_drift[idx] = cum_drift(amps)
except Exception as err:
print("Failed to compute 'cum_drift(amps)' for unit {}. Details: \n {}"
.format(unit, err))
units_missing_metrics.add(unit)
# Cumulative drift of spike depths, normalized by total number of spikes.
try:
cum_depth_drift[idx] = cum_drift(depths)
except Exception as err:
print("Failed to compute 'cum_drift(depths)' for unit {}. Details: \n {}"
.format(unit, err))
units_missing_metrics.add(unit)
# Coefficient of variation of spike amplitudes.
try:
cv_amp[idx] = np.std(amps) / np.mean(amps)
except Exception as err:
print("Failed to compute 'cv_amp' for unit {}. Details: \n {}".format(unit, err))
units_missing_metrics.add(unit)
# Coefficient of variation of computed instantaneous firing rate.
try:
fr = bb.singlecell.firing_rate(ts, hist_win=0.01, fr_win=0.25)
cv_fr[idx] = np.std(fr) / np.mean(fr)
except Exception as err:
print("Failed to compute 'cv_fr' for unit {}. Details: \n {}".format(unit, err))
units_missing_metrics.add(unit)
# Fraction of isi violations.
try:
frac_isi_viol[idx], _, _ = isi_viol(ts, rp=0.002)
except Exception as err:
print("Failed to compute 'frac_isi_viol' for unit {}. Details: \n {}"
.format(unit, err))
units_missing_metrics.add(unit)
# Estimated fraction of missing spikes.
try:
frac_missing_spks[idx], _, _ = feat_cutoff(
amps, spks_per_bin=10, sigma=4, min_num_bins=50)
except Exception as err:
print("Failed to compute 'frac_missing_spks' for unit {}. Details: \n {}"
.format(unit, err))
units_missing_metrics.add(unit)
# Estimated fraction of false positives.
try:
fp_estimate[idx] = fp_est(ts, rp=0.002)
except Exception as err:
print("Failed to compute 'fp_est' for unit {}. Details: \n {}".format(unit, err))
units_missing_metrics.add(unit)
# Presence ratio
try:
presence_ratio[idx], _ = pres_ratio(ts, hist_win=10)
except Exception as err:
print("Failed to compute 'pres_ratio' for unit {}. Details: \n {}".format(unit, err))
units_missing_metrics.add(unit)
# Presence ratio over the standard deviation of spike counts in each bin
try:
pr, pr_bins = pres_ratio(ts, hist_win=10)
pres_ratio_std[idx] = pr / np.std(pr_bins)
except Exception as err:
print("Failed to compute 'pres_ratio_std' for unit {}. Details: \n {}"
.format(unit, err))
units_missing_metrics.add(unit)
#append metrics to the current clusters.metrics
metrics_read = pd.read_csv(Path(alf_probe_dir,'clusters.metrics.csv'))
if not 'label' in metrics_read.columns:
try:
label_df = pd.DataFrame(label)
pd.DataFrame.insert(metrics_read,1,'label',label_df)
except ValueError:
pd.DataFrame.drop(metrics_read,columns = 'label')
pd.DataFrame.insert(metrics_read,1,'label',label_df)
except:
print("Could not save 'label' to .csv.")
try:
df_cum_amp_drift = pd.DataFrame(cum_amp_drift.round(2))
metrics_read['cum_amp_drift'] = df_cum_amp_drift
except Exception as err:
print("Could not save 'cum_amp_drift' to .csv. Details: \n {}".format(err))
try:
df_cum_depth_drift = pd.DataFrame(cum_depth_drift.round(2))
metrics_read['cum_depth_drift'] = df_cum_depth_drift
except Exception as err:
print("Could not save 'cum_depth_drift' to .tsv. Details: \n {}".format(err))
try:
df_cv_amp = pd.DataFrame(cv_amp.round(2))
metrics_read['cv_amp'] = df_cv_amp
except Exception as err:
print("Could not save 'cv_amp' to .tsv. Details: \n {}".format(err))
try:
df_cv_fr = pd.DataFrame(cv_fr.round(2))
metrics_read['cv_fr'] = df_cv_fr
except Exception as err:
print("Could not save 'cv_fr' to .tsv. Details: \n {}".format(err))
try:
df_frac_isi_viol = pd.DataFrame(frac_isi_viol.round(2))
metrics_read['frac_isi_viol'] = df_frac_isi_viol
except Exception as err:
print("Could not save 'frac_isi_viol' to .tsv. Details: \n {}".format(err))
try:
df_frac_missing_spks = pd.DataFrame(frac_missing_spks.round(2))
metrics_read['frac_missing_spks'] = df_frac_missing_spks
except Exception as err:
print("Could not save 'frac_missing_spks' to .tsv. Details: \n {}".format(err))
try:
df_fp_est = pd.DataFrame(fp_estimate.round(2))
metrics_read['fp_est'] = df_fp_est
except Exception as err:
print("Could not save 'fp_est' to .tsv. Details: \n {}".format(err))
try:
df_pres_ratio = pd.DataFrame(presence_ratio.round(2))
metrics_read['pres_ratio'] = df_pres_ratio
except Exception as err:
print("Could not save 'pres_ratio' to .tsv. Details: \n {}".format(err))
try:
df_pres_ratio_std = pd.DataFrame(pres_ratio_std.round(2))
metrics_read['pres_ratio_std'] = df_pres_ratio_std
except Exception as err:
print("Could not save 'pres_ratio_std' to .tsv. Details: \n {}".format(err))
try:
df_refp_viol = pd.DataFrame(RefPViol)
pd.DataFrame.insert(metrics_read,2,'refp_viol',df_refp_viol)
except ValueError:
pd.DataFrame.drop(metrics_read,columns = 'refp_viol')
pd.DataFrame.insert(metrics_read,2,'refp_viol', df_refp_viol)
except Exception as err:
print("Could not save 'RefPViol' to .tsv. Details: \n {}".format(err))
try:
df_noise_cutoff = pd.DataFrame(NoiseCutoff)
pd.DataFrame.insert(metrics_read,3,'noise_cutoff',df_noise_cutoff)
except ValueError:
pd.DataFrame.drop(metrics_read,columns = 'noise_cutoff')
pd.DataFrame.insert(metrics_read,3,'noise_cutoff',df_noise_cutoff)
except Exception as err:
print("Could not save 'NoiseCutoff' to .tsv. Details: \n {}".format(err))
try:
df_mean_amp_true = pd.DataFrame(MeanAmpTrue)
| pd.DataFrame.insert(metrics_read,4,'mean_amp_true',df_mean_amp_true) | pandas.DataFrame.insert |
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
companies = ['AAPL', 'GOOG', 'GOOGL', 'AMZN', 'TSLA', 'MSFT']
DATA_DIR = "./data/regression"
# TODO feature engineering
for ticker in companies:
print(f"regression: {ticker}")
data = pd.read_csv(f"{DATA_DIR}/public-sentiment-{ticker}.csv")
data.dropna(inplace=True)
# drop if num of tweets is too small (< 50)
data = data[data['num_tweets'] > 50]
train_data = data[pd.to_datetime(data['end']) < pd.to_datetime('2018-01-01')]
test_data = data[pd.to_datetime(data['end']) >= | pd.to_datetime('2018-01-01') | pandas.to_datetime |
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
import textwrap
import cupy as cp
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings, strategies as st
import cudf
from cudf.core._compat import PANDAS_GE_110
from cudf.tests import utils
from cudf.utils.dtypes import cudf_dtypes_to_pandas_dtypes
repr_categories = utils.NUMERIC_TYPES + ["str", "category", "datetime64[ns]"]
@pytest.mark.parametrize("dtype", repr_categories)
@pytest.mark.parametrize("nrows", [0, 5, 10])
def test_null_series(nrows, dtype):
size = 5
mask = utils.random_bitmask(size)
data = cudf.Series(np.random.randint(1, 9, size))
column = data.set_mask(mask)
sr = cudf.Series(column).astype(dtype)
if dtype != "category" and np.dtype(dtype).kind in {"u", "i"}:
ps = pd.Series(
sr._column.data_array_view.copy_to_host(),
dtype=cudf_dtypes_to_pandas_dtypes.get(
np.dtype(dtype), np.dtype(dtype)
),
)
ps[sr.isnull().to_pandas()] = pd.NA
else:
ps = sr.to_pandas()
pd.options.display.max_rows = int(nrows)
psrepr = ps.__repr__()
psrepr = psrepr.replace("NaN", "<NA>")
psrepr = psrepr.replace("NaT", "<NA>")
psrepr = psrepr.replace("None", "<NA>")
if (
dtype.startswith("int")
or dtype.startswith("uint")
or dtype.startswith("long")
):
psrepr = psrepr.replace(
str(sr._column.default_na_value()) + "\n", "<NA>\n"
)
if "UInt" in psrepr:
psrepr = psrepr.replace("UInt", "uint")
elif "Int" in psrepr:
psrepr = psrepr.replace("Int", "int")
assert psrepr.split() == sr.__repr__().split()
pd.reset_option("display.max_rows")
dtype_categories = [
"float32",
"float64",
"datetime64[ns]",
"str",
"category",
]
@pytest.mark.parametrize("ncols", [1, 2, 3, 4, 5, 10])
def test_null_dataframe(ncols):
size = 20
gdf = cudf.DataFrame()
for idx, dtype in enumerate(dtype_categories):
mask = utils.random_bitmask(size)
data = cudf.Series(np.random.randint(0, 128, size))
column = data.set_mask(mask)
sr = cudf.Series(column).astype(dtype)
gdf[dtype] = sr
pdf = gdf.to_pandas()
pd.options.display.max_columns = int(ncols)
pdfrepr = pdf.__repr__()
pdfrepr = pdfrepr.replace("NaN", "<NA>")
pdfrepr = pdfrepr.replace("NaT", "<NA>")
pdfrepr = pdfrepr.replace("None", "<NA>")
assert pdfrepr.split() == gdf.__repr__().split()
pd.reset_option("display.max_columns")
@pytest.mark.parametrize("dtype", repr_categories)
@pytest.mark.parametrize("nrows", [0, 1, 2, 9, 10, 11, 19, 20, 21])
def test_full_series(nrows, dtype):
size = 20
ps = pd.Series(np.random.randint(0, 100, size)).astype(dtype)
sr = cudf.from_pandas(ps)
pd.options.display.max_rows = int(nrows)
assert ps.__repr__() == sr.__repr__()
pd.reset_option("display.max_rows")
@pytest.mark.parametrize("dtype", repr_categories)
@pytest.mark.parametrize("nrows", [0, 1, 2, 9, 20 / 2, 11, 20 - 1, 20, 20 + 1])
@pytest.mark.parametrize("ncols", [0, 1, 2, 9, 20 / 2, 11, 20 - 1, 20, 20 + 1])
def test_full_dataframe_20(dtype, nrows, ncols):
size = 20
pdf = pd.DataFrame(
{idx: np.random.randint(0, 100, size) for idx in range(size)}
).astype(dtype)
gdf = cudf.from_pandas(pdf)
ncols, nrows = gdf._repr_pandas025_formatting(ncols, nrows, dtype)
pd.options.display.max_rows = int(nrows)
pd.options.display.max_columns = int(ncols)
assert pdf.__repr__() == gdf.__repr__()
assert pdf._repr_html_() == gdf._repr_html_()
assert pdf._repr_latex_() == gdf._repr_latex_()
pd.reset_option("display.max_rows")
pd.reset_option("display.max_columns")
@pytest.mark.parametrize("dtype", repr_categories)
@pytest.mark.parametrize("nrows", [9, 21 / 2, 11, 21 - 1])
@pytest.mark.parametrize("ncols", [9, 21 / 2, 11, 21 - 1])
def test_full_dataframe_21(dtype, nrows, ncols):
size = 21
pdf = pd.DataFrame(
{idx: np.random.randint(0, 100, size) for idx in range(size)}
).astype(dtype)
gdf = cudf.from_pandas(pdf)
pd.options.display.max_rows = int(nrows)
pd.options.display.max_columns = int(ncols)
assert pdf.__repr__() == gdf.__repr__()
pd.reset_option("display.max_rows")
pd.reset_option("display.max_columns")
@given(
st.lists(
st.integers(-9223372036854775808, 9223372036854775807),
min_size=1,
max_size=10000,
)
)
@settings(deadline=None)
def test_integer_dataframe(x):
gdf = cudf.DataFrame({"x": x})
pdf = gdf.to_pandas()
pd.options.display.max_columns = 1
assert gdf.__repr__() == pdf.__repr__()
assert gdf.T.__repr__() == pdf.T.__repr__()
pd.reset_option("display.max_columns")
@given(
st.lists(
st.integers(-9223372036854775808, 9223372036854775807), max_size=10000
)
)
@settings(deadline=None)
def test_integer_series(x):
sr = cudf.Series(x)
ps = cudf.utils.utils._create_pandas_series(data=x)
assert sr.__repr__() == ps.__repr__()
@given(st.lists(st.floats()))
@settings(deadline=None)
def test_float_dataframe(x):
gdf = cudf.DataFrame({"x": cudf.Series(x, nan_as_null=False)})
pdf = gdf.to_pandas()
assert gdf.__repr__() == pdf.__repr__()
@given(st.lists(st.floats()))
@settings(deadline=None)
def test_float_series(x):
sr = cudf.Series(x, nan_as_null=False)
ps = cudf.utils.utils._create_pandas_series(data=x)
assert sr.__repr__() == ps.__repr__()
@pytest.fixture
def mixed_pdf():
pdf = pd.DataFrame()
pdf["Integer"] = np.array([2345, 11987, 9027, 9027])
pdf["Date"] = np.array(
["18/04/1995", "14/07/1994", "07/06/2006", "16/09/2005"]
)
pdf["Float"] = np.array([9.001, 8.343, 6, 2.781])
pdf["Integer2"] = np.array([2345, 106, 2088, 789277])
pdf["Category"] = np.array(["M", "F", "F", "F"])
pdf["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"])
pdf["Boolean"] = np.array([True, False, True, False])
return pdf
@pytest.fixture
def mixed_gdf(mixed_pdf):
return cudf.from_pandas(mixed_pdf)
def test_mixed_dataframe(mixed_pdf, mixed_gdf):
assert mixed_gdf.__repr__() == mixed_pdf.__repr__()
def test_mixed_series(mixed_pdf, mixed_gdf):
for col in mixed_gdf.columns:
assert mixed_gdf[col].__repr__() == mixed_pdf[col].__repr__()
def test_MI():
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 4, 10),
"b": np.random.randint(0, 4, 10),
"c": np.random.randint(0, 4, 10),
}
)
levels = [["a", "b", "c", "d"], ["w", "x", "y", "z"], ["m", "n"]]
codes = cudf.DataFrame(
{
"a": [0, 0, 0, 0, 1, 1, 2, 2, 3, 3],
"b": [0, 1, 2, 3, 0, 1, 2, 3, 0, 1],
"c": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
}
)
pd.options.display.max_rows = 999
pd.options.display.max_columns = 0
gdf = gdf.set_index(cudf.MultiIndex(levels=levels, codes=codes))
pdf = gdf.to_pandas()
gdfT = gdf.T
pdfT = pdf.T
assert gdf.__repr__() == pdf.__repr__()
assert gdf.index.__repr__() == pdf.index.__repr__()
assert gdfT.__repr__() == pdfT.__repr__()
pd.reset_option("display.max_rows")
pd.reset_option("display.max_columns")
@pytest.mark.parametrize("nrows", [0, 1, 3, 5, 10])
@pytest.mark.parametrize("ncols", [0, 1, 2, 3])
def test_groupby_MI(nrows, ncols):
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
pdf = gdf.to_pandas()
gdg = gdf.groupby(["a", "b"], sort=True).count()
pdg = pdf.groupby(["a", "b"], sort=True).count()
pd.options.display.max_rows = nrows
pd.options.display.max_columns = ncols
assert gdg.__repr__() == pdg.__repr__()
assert gdg.index.__repr__() == pdg.index.__repr__()
assert gdg.T.__repr__() == pdg.T.__repr__()
pd.reset_option("display.max_rows")
| pd.reset_option("display.max_columns") | pandas.reset_option |
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from __future__ import print_function
import chemplot.descriptors as desc
import chemplot.parameters as parameters
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import umap
import base64
import functools
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from pandas.api.types import is_numeric_dtype
from rdkit.Chem import Draw
from bokeh.plotting import figure
from bokeh.transform import transform, factor_cmap
from bokeh.palettes import Category10, Inferno, Spectral4
from bokeh.models.mappers import LinearColorMapper
from bokeh.models import ColorBar, HoverTool, Panel, Tabs
from bokeh.io import output_file, save, show
from scipy import stats
from io import BytesIO
def calltracker(func):
@functools.wraps(func)
def wrapper(*args):
wrapper.has_been_called = True
return func(*args)
wrapper.has_been_called = False
return wrapper
class Plotter(object):
"""
A class used to plot the ECFP fingerprints of the molecules used to
instantiate it.
:param __sim_type: similarity type structural or tailored
:param __target_type: target type R (regression) or C (classificatino)
:param __target: list containing the target values. Is empty if a target does not exist
:param __mols: list of valid molecules that can be plotted
:param __df_descriptors: datatframe containing the descriptors representation of each molecule
:param __df_2_components: dataframe containing the two-dimenstional representation of each molecule
:param __plot_title: title of the plot reflecting the dimensionality reduction algorithm used
:param __data: list of the scaled descriptors to which the dimensionality reduction algorithm is applied
:param pca_fit: PCA object created when the corresponding algorithm is applied to the data
:param tsne_fit: t-SNE object created when the corresponding algorithm is applied to the data
:param umap_fit: UMAP object created when the corresponding algorithm is applied to the data
:param df_plot_xy: dataframe containing the coordinates that have been plotted
:type __sim_type: string
:type __target_type: string
:type __target: list
:type __mols: rdkit.Chem.rdchem.Mol
:type __df_descriptors: Dataframe
:type __df_2_components: Dataframe
:type __plot_title: string
:type __data: list
:type tsne_fit: sklearn.manifold.TSNE
:type umap_fit: umap.umap_.UMAP
:type df_plot_xy: Dataframe
"""
_static_plots = {'scatter', 'hex', 'kde'}
_interactive_plots = {'scatter', 'hex'}
_sim_types = {'tailored', 'structural'}
_target_types = {'R', 'C'}
def __init__(self, encoding_list, target, target_type, sim_type, get_desc, get_fingerprints):
# Error handeling sym_type
if sim_type not in self._sim_types:
if len(target) > 0:
self.__sim_type = 'tailored'
print('sim_type indicates the similarity type by which the plots are constructed.\n' +
'The supported similarity types are structural and tailored.\n' +
'Because a target list has been provided \'tailored\' as been selected as sym_type.')
else:
self.__sim_type = 'structural'
print('sim_type indicates the similarity type by which the plots are constructed.\n' +
'The supported similarity types are structural and tailored.\n' +
'Because no target list has been provided \'structural\' as been selected as sym_type.')
else:
self.__sim_type = sim_type
if self.__sim_type != "structural" and len(target) == 0:
raise Exception("Target values missing")
# Error handeling target_type
if len(target) > 0:
if len(target) != len(encoding_list):
raise Exception("If target is provided its length must match the instances of molecules")
if len(target) > 0:
df_target = pd.DataFrame(data=target)
unique_targets_ratio = 1.*df_target.iloc[:, 0].nunique()/df_target.iloc[:, 0].count() < 0.05
numeric_target = | is_numeric_dtype(df_target.dtypes[0]) | pandas.api.types.is_numeric_dtype |
import sys
import pandas as pd
import sqlalchemy
from langdetect import DetectorFactory, detect
def load_data(messages_filepath, categories_filepath):
"""
Reads up the messages and categories into a Pandas DataFrame
Parameters
messages_filepath(str): file path of messages data file
categories_filepath(str):file path of categories data file
Returns
Pandas DataFrame
"""
# load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
class DescTC():
"""
It generates a range of information about the data provided and gives a better visualization of the data before the cleansing process has been done.
Therefore, It helps you decide the best data cleansing method that should be used as your analysis starting point.
"""
def __init__(self, df):
self.df = df
def table(self):
"""Analyses ``DataFrame`` column sets of mixed data types. The output
will vary depending on the columns type. Refer to the notes
below for more detail.
Returns
-------
DataFrame
Summary information and some of the descriptive statistics of the data provided.
``Type``: is the type of the column values of the column,
``Quant.Zeros``: is the quantity of the zero values of the column,
``Quant.NaNs``: is the quantity of the NaN values of the column,
``%NaNs``: is the percentage of the NaN values of the column,
``Quant.Uniques``: is the quantity of unique values of the column,
``Quant.Outliers``: is the quantity of unique values of the column,
``Min/Lowest``: is the minimum value of the numeric column or the lowest value of the categorical column,
``Mean``: is the average/mean of the values of the numeric column,
``Median``: is the 50th percentile of the numeric column,
``Mode``: is the most often value of the column,
``Max/Highest``: is the maximum value of the numeric column or the most common value of the categorical column.
The z-score is the outlier finder method used. This method can only be used with
a gaussian distribution. Disregard this outlier output if the distribution
of the variable can not be assumed to be parametric.
If multiple object values have the lowest/highest count,
then the results will be arbitrarily chosen from
among those with the lowest/highest count.
Notes
-----
All empty columns of your data frame will be excluded and not generated in the output.
The data must be converted to a pandas DataFrame with column names. Make the first row as a column headers.
"""
import numpy as np
import pandas as pd
empty_cols = [col for col in self.df.columns if self.df[col].isnull().all()]
self.df.drop(empty_cols, axis=1, inplace=True)
np.seterr(divide='ignore', invalid='ignore')
col_list = []
zr_list = []
unique_list = []
miss_table = []
outlier = []
mean_list = []
median_list = []
mode_list = []
max_list = []
min_list = []
O_col_name = []
O_col_pos = []
for colname in self.df:
zeros = len(self.df) - np.count_nonzero(self.df[colname].tolist())
zr_list.append(zeros)
unique = self.df[colname].unique()
unique_list.append(unique)
mode = self.df[colname].mode()[0]
mode_list.append(mode)
col_list.append(colname)
# Replacing question marks with np.nan
self.df[colname] = self.df[colname].apply(lambda x: np.nan if str(x).find('?') > -1 else x)
types = self.df.dtypes.tolist()
nan = self.df.isna().sum().tolist()
nanp = round((self.df.isna().sum() * 100 / len(self.df)), 2).tolist()
# Creating a new dataFrame without the non-numeric columns and finding their names and positions to create a new df.
for col in self.df.columns:
if self.df.dtypes[col] == "O" or self.df.dtypes[col] == "bool":
O_col_name.append(self.df[col].name)
O_col_pos.append(self.df.columns.get_loc(self.df[col].name))
df_new = self.df.drop(self.df.columns[O_col_pos], axis=1)
# Creating another DataFrame (df_pad) with the Zscore without the non-numeric columns
from scipy import stats
from scipy.stats import zscore
df_pad = stats.zscore(df_new)
df_pad = pd.DataFrame(df_pad, columns=df_new.columns.tolist())
# Creating a dataframe column with number 1 to replace object columns
c_ones = np.ones(len(self.df))
# Inserting the non-numeric columns on the df_pad (dataframe Zscore)
for i in range(len(O_col_pos)):
df_pad.insert(loc=O_col_pos[i], column=O_col_name[i], value=c_ones)
# Finding quantity of outliers for each column
for c in range(len(df_pad.columns)):
out_col = []
count = 0
for j in df_pad.values[:, c]:
if np.abs(j) > 3:
count += 1
out_col.append(count)
outlier.append(out_col)
# Finding Max, Min, Average and median for each column
for c in self.df.columns:
if c not in O_col_name:
max_list.append(self.df[c].max())
min_list.append(self.df[c].min())
mean_list.append(self.df[c].mean())
median_list.append(self.df[c].median())
else:
max_list.append(self.df[c].value_counts().index[:1][0])
min_list.append(self.df[c].value_counts().index[:len(self.df[c].unique())][-1])
mean_list.append("NaN")
median_list.append("NaN")
# Final result
for i in range(len(self.df.columns)):
df_missing = [types[i], zr_list[i], nan[i], nanp[i], len(set(unique_list[i])), outlier[i], min_list[i],
mean_list[i], median_list[i], mode_list[i], max_list[i]]
miss_table.append(df_missing)
missing_table = pd.DataFrame(miss_table, index=col_list,
columns=["Type", "Quant.Zeros", "Quant.NaNs", "%NaNs", "Quant.Uniques",
"Quant.Outliers", "Min/Lowest", "Mean", "Median", "Mode", "Max/Highest"])
return missing_table
def chart(self):
"""
This method condense large amounts of information into easy-to-understand formats
that clearly and effectively communicate important points
Returns
-------
The countplot for qualitative variables
Plot a histogram and box plot for quantitative variables
Plot the correlation between quantitative variables
Note:
The object data type can actually contain multiple different types. For instance, the column
could include integers, floats, and strings which collectively are labeled as an object.
Therefore, you may not get the box plot plotted from an object dtype variable.
"""
import matplotlib.pyplot as plt
import seaborn as sns
empty_cols = [col for col in self.df.columns if self.df[col].isnull().all()]
self.df.drop(empty_cols, axis=1, inplace=True)
plt.rcParams.update({'figure.max_open_warning': 0})
# Distribution Chart for each dataframe column
for col in self.df.columns:
print('Variable: \033[34m{}\033[m'.format(col))
if self.df.dtypes[col] == "O" or self.df.dtypes[col] == "bool":
if len(self.df[col].unique()) > 50:
print(
'Since the variable \033[32m{}\033[m has several distinct values, the visualization process through the chart is not a good option.'.format(
col))
print()
print()
else:
plt.figure(figsize=(9, 4))
chart = sns.countplot(x=col, data=self.df, order=self.df[col].value_counts().index)
chart.set(title="Frequency Chart - {}".format(col))
plt.xticks(rotation=45, horizontalalignment='right')
plt.grid(color='gray', ls='-.', lw=0.08)
plt.show()
print()
print()
else:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
ax1.hist(self.df[col], label=col, bins='sqrt')
ax1.set(title="Frequency Chart - {}".format(col), xlabel=col, ylabel="count")
ax1.grid(color='gray', ls='-.', lw=0.08)
plt.setp(ax1.get_xticklabels(), rotation=15)
red_diamond = dict(markerfacecolor='r', marker='D')
ax2.boxplot(self.df[col], flierprops=red_diamond)
ax2.set(title="Box-plot - {}".format(col), xlabel=col, ylabel="values")
ax2.grid(color='gray', ls='-.', lw=0.08)
plt.show()
print()
print()
# Correlation Plot
if self.df.shape[1] <= 10:
plt.figure(figsize=(5, 5))
elif self.df.shape[1] <= 20:
plt.figure(figsize=(8, 8))
else:
plt.figure(figsize=(20, 20))
sns.heatmap(self.df.corr(), annot=True, cmap='RdBu')
plt.title('Correlation Plot - Heatmap', fontsize=14)
plt.xticks(rotation=15)
plt.yticks(rotation=15)
def printfullTable(self):
"""
Useful to see the entire outcome independently on which environment you are executing the package.
"""
import pandas as pd
| pd.set_option('display.max_rows', None) | pandas.set_option |
import os
import pandas as pd
import geopandas as gpd
import plotly.graph_objs as go
def graph(df, clusters_list, branch, grid_resume_opt, pop_thresh,
full_ele='no', substations=False):
'''
Create graph for the Grid Routing Tab and for the NPC analysis tab
:param df:
:param clusters_list:
:param branch:
:param grid_resume_opt:
:param substations: geodataframe with substations point. If subs are not yet
provided, it is not given and hence set to False
:param pop_thresh:
:param full_ele:
:return:
'''
print('Plotting results..')
# todo -> study area must be created at the beginning
if os.path.isfile(r'Input/study_area.shp'):
study_area = gpd.read_file(r'Input/study_area.shp')
study_area = study_area.to_crs(epsg=4326)
area = study_area.boundary.unary_union.xy
else:
study_area = | pd.DataFrame() | pandas.DataFrame |
# import bibtexparser
from fixtex import fix_bib
import utool as ut
import numpy as np
import pandas as pd
pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
# PARSE DATABASE
# full_bibman = fix_bib.BibMan('FULL.bib', doc='thesis')
bibman = fix_bib.BibMan('final-bib.bib', doc='thesis')
bibman.sort_entries()
bibman.write_testfile()
bibman.printdiff()
bibman.save()
print('bibman.unregistered_pubs = {}'.format(ut.repr4(bibman.unregistered_pubs)))
for pub in bibman.unregistered_pubs:
if 'None' in str(pub):
print(ut.repr4(pub.entry))
df = pd.DataFrame.from_dict(bibman.cleaned, orient='index')
del df['abstract']
# want = text.count('@')
want = len(df)
# paged_items = df[~pd.isnull(df['pub_abbrev'])]
# has_pages = ~pd.isnull(paged_items['pages'])
# print('have pages {} / {}'.format(has_pages.sum(), len(has_pages)))
# print(ut.repr4(paged_items[~has_pages]['title'].values.tolist()))
df.loc[pd.isnull(df['pub_type']), 'pub_type'] = 'None'
entrytypes = dict(list(df.groupby('pub_type')))
n_grouped = sum(map(len, entrytypes.values()))
assert n_grouped == want
pub_types = {
'journal': None,
'conference': None,
'incollection': None,
'online': None,
'thesis': None,
'report': None,
'book': None,
}
for unknown in set(entrytypes.keys()).difference(set(pub_types)):
print('unknown = {!r}'.format(unknown))
g = entrytypes[unknown]
g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]
print('g = {!r}'.format(g))
ignore = {
'conference': ['eventtitle', 'doi', 'urldate', 'location', 'volume'],
'journal': ['doi', 'urldate', 'issue', 'number', 'volume'],
'book': ['urldate'],
'thesis': ['urldate'],
'online': ['type'],
'report': ['urldate'],
}
for v in ignore.values():
v.append('eprinttype')
v.append('eprint')
print('Entry type freq:')
print(ut.map_vals(len, entrytypes))
for e, g in entrytypes.items():
print('\n --- TYPE = %r' % (e.upper(),))
g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]
missing_cols = g.columns[np.any(pd.isnull(g), axis=0)]
if e in ignore:
missing_cols = missing_cols.difference(ignore[e])
print('missing_cols = {!r}'.format(missing_cols.tolist()))
for col in missing_cols:
print('col = {!r}'.format(col))
print(g[pd.isnull(g[col])].index.tolist())
for e, g in entrytypes.items():
print('e = %r' % (e,))
g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]
if 'pub_full' in g.columns:
place_title = g['pub_full'].tolist()
print(ut.repr4(ut.dict_hist(place_title)))
else:
print(g)
print('Unknown publications')
if 'report' in entrytypes:
g = entrytypes['report']
missing = g[pd.isnull(g['title'])]
if len(missing):
print('Missing Title')
print(ut.repr4(missing[['title', 'author']].values.tolist()))
if 'journal' in entrytypes:
g = entrytypes['journal']
g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]
missing = g[pd.isnull(g['journal'])]
if len(missing):
print('Missing Journal')
print(ut.repr4(missing[['title', 'author']].values.tolist()))
if 'conference' in entrytypes:
g = entrytypes['conference']
g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]
missing = g[pd.isnull(g['booktitle'])]
if len(missing):
print('Missing Booktitle')
print(ut.repr4(missing[['title', 'author']].values.tolist()))
if 'incollection' in entrytypes:
g = entrytypes['incollection']
g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]
missing = g[pd.isnull(g['booktitle'])]
if len(missing):
print('Missing Booktitle')
print(ut.repr4(missing[['title', 'author']].values.tolist()))
if 'thesis' in entrytypes:
g = entrytypes['thesis']
g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]
missing = g[ | pd.isnull(g['institution']) | pandas.isnull |
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from scipy.integrate import solve_ivp
import numpy as np
from tqdm import tqdm
from scipy.optimize import curve_fit
from functools import partial
import multiprocessing as mp
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import MultiTaskLassoCV , Lasso
from sklearn.multioutput import MultiOutputRegressor
import xgboost as xgb
def mae(pred, true):
return np.mean(np.abs(pred - true))
class SIR_fitter():
''' Class that use the features to extract SIR parameters. Fitted parameters will be keyed by country and date.
infected_days: days previous to day0 to sum the number of currently infected.
semi_fit_days: number of days before and after the actual day to fit the SIR parameters on.
beta_i: initial value for fitting beta.
gamma_i: initial value for fitting gamma.
'''
def __init__(self, moving_average=False,
infection_days=7, semi_fit_days=7,
beta_i=0.6, gamma_i=1/7,nprocs=4):
self.infection_days=infection_days
self.semi_fit=semi_fit_days
self.fit_days=semi_fit_days*2+1
self.time_integ=np.linspace(-self.semi_fit,self.semi_fit,self.fit_days)
#self.time_integ=np.linspace(0,1,2) # questa
self.beta_i=beta_i
self.gamma_i=gamma_i
self.nprocs=nprocs
self.moving_average=moving_average
self.tempone=np.linspace(0,10000,10001)
def fit_country(self,df_country):
COL = ['NewCases'] if not self.moving_average else ['MA']
gdf = df_country
gdf['beta']=np.nan
gdf['gamma']=np.nan
gdf['predicted_cases']=gdf[COL].copy()
all_case_data = np.array(gdf[COL])
target_cases = all_case_data.copy()
nb_total_days = len(gdf)
for d,(idx,row) in enumerate(gdf.iterrows()):
if d>self.semi_fit and d<(nb_total_days-self.semi_fit):
N=row.Population
# X_cases = all_case_data[d - self.semi_fit:d+self.semi_fit+1].reshape(-1)
X_true_cases = target_cases[d - self.semi_fit:d+self.semi_fit+1].reshape(-1)
I0=all_case_data[d-self.semi_fit- self.infection_days:
d-self.semi_fit+1].sum()
Ic0=all_case_data[0:(d-self.semi_fit)+1].sum()
R0=Ic0-I0
S0=N-I0-R0
x0=(S0,I0,Ic0,R0)
time_integ=self.tempone[d-self.semi_fit:d+self.semi_fit+1].reshape(-1)
if I0<0:
raise ValueError('Infected was {} for population {}'.format(x0[1],N))
elif I0<1:
popt=np.array([np.nan,np.nan])
else:
fintegranda=partial(self.__SIR_integrate,time_integ,x0,N)
popt, pcov = curve_fit(fintegranda, time_integ,
X_true_cases[1:],
p0=[self.beta_i,self.gamma_i],maxfev=5000,bounds=([0.,0.],
[np.inf,1.]))
gdf.loc[idx,'beta']=popt[0]
gdf.loc[idx,'gamma']=popt[1]
if d < nb_total_days-self.semi_fit-1:
fut_integ=self.tempone[d-self.semi_fit:d+self.semi_fit+2]
all_case_data[d+self.semi_fit+1]=fintegranda(fut_integ,
popt[0],popt[1])[-1]
gdf.loc[idx,'predicted_cases']=all_case_data[d+self.semi_fit]
return gdf
def __SIR_ode(self,t,x0, N, beta, gamma):
S, I, Ic, R = x0
dS = -beta * S * I / N
dI = beta * S * I / N - gamma * I
# Computes the cumulative new cases during the integration period
dIc= -dS
dR = gamma * I
return dS, dI, dIc,dR
def __SIR_integrate(self,ttotp,x0,N,ti,beta,gamma):
''' Argument ti not used but needed by curve_fit '''
sol=solve_ivp(self.__SIR_ode,[ttotp[0],ttotp[-1]],x0,args=(N,beta,gamma),t_eval=ttotp)
#lung=len(sol.y[0])
# The only variable to predict is "NewCases", i.d. the difference of the cumulative Ic
return np.diff(sol.y[2])#.reshape(lung,1).flatten()
def row_predict(self,GeoID,date,pars=None):
'''
Given the SIR parameters, predicts the new cases. The last one is the actual prediction for the final MAE
'''
row=self.df.loc[(self.df.GeoID==GeoID)&(self.df.Date==pd.to_datetime(date)),:].iloc[0,:]
N,x0=self.row_initial_conditions(row)
if np.isnan(pars[0]):
return np.repeat(row.CasesDay0, self.fit_days)[1:]
if pars is not None:
beta=pars[0]
gamma=pars[1]
else:
beta=self.df_pars.loc[(self.df_pars.GeoID==GeoID) &
(self.df_pars.Date==pd.to_datetime(date)),'beta'].iloc[0]
gamma=self.df_pars.loc[(self.df_pars.GeoID==GeoID) &
(self.df_pars.Date==pd.to_datetime(date)),'gamma'].iloc[0]
Ipred=self.__SIR_integrate(self.time_integ,x0,N,self.time_integ,beta,gamma)
return Ipred
def fit(self,df,save_to=None):
'''
Fit SIR parameters on all the data.
save_to: path to save the results in pickle format. Results are saved a Pandas DataFrame having columns: GeoID,Date,beta,gamma
'''
if self.semi_fit<3:
raise ValueError('ValueError: semi_fit_days should be higher than 2')
geo_ids = df.GeoID.unique()
COL = ['NewCases'] if not self.moving_average else ['MA']
df=df[['GeoID','Date','Population']+COL]
self.df_chunks=[df[df.GeoID==g].sort_values('Date')
for g in geo_ids]
nchunks=len(self.df_chunks)
pool=mp.Pool(self.nprocs)
outputs=list(tqdm(pool.imap(self.fit_country,self.df_chunks),total=nchunks))
pool.close()
pool.join()
self.df_pars=pd.concat(outputs)
self.df_pars.sort_values(['GeoID','Date'],inplace=True)
if save_to is not None:
with open(save_to,'wb') as f:
pickle.dump(self.df_pars,f)
# Return the classifier
return self
class SIR_predictor(BaseEstimator, RegressorMixin, SIR_fitter):
def __init__(self, df=None,moving_average=True,
infection_days=7, semi_fit=3,
beta_i=0.6, gamma_i=1/7,lookback_days=30,
MLmodel= 'MultiOutputRegressor(xgb.XGBRegressor())',
paral_predict=False,pre_computed=None,nprocs=4):
self.df=df
self.moving_average=moving_average
self.infection_days=infection_days
self.semi_fit=semi_fit
self.beta_i=beta_i
self.gamma_i=gamma_i
self.lookback_days=lookback_days
self.nprocs=nprocs
self.paral_predict=paral_predict
self.MLmodel=MLmodel
self.pre_computed=pre_computed
def SIR_ode(self,t,x0, N, beta, gamma):
return self._SIR_fitter__SIR_ode(t,x0, N, beta, gamma)
def SIR_integrate(self,ttotp,x0,N,ti,beta,gamma):
return self._SIR_fitter__SIR_integrate(ttotp,x0,N,ti,beta,gamma)
def fit(self,X,y):
check_X_y(X,y)
if self.pre_computed is None:
if 'MA' in self.df.columns:
self.df.loc[self.df.MA<0,'MA']=0.
if 'NewCases' in self.df.columns:
self.df.loc[self.df.NewCases<0,'NewCases']=0.
self.df.loc[self.df.ConfirmedCases<0,'ConfirmedCases']=0.
self.SFmodel=SIR_fitter(self.moving_average,
self.infection_days, self.semi_fit,
self.beta_i, self.gamma_i,self.nprocs)
print('Fitting SIR parameters...')
self.SFmodel.fit(self.df)
else:
print('Already computed SIR parameters')
self.SFmodel=SIR_fitter(self.moving_average,
self.infection_days, self.semi_fit,
self.beta_i, self.gamma_i,self.nprocs)
self.SFmodel.df_pars=self.pre_computed
#self.SFmodel.df_pars=self.df.copy()
#self.SFmodel.df_pars['beta']=0.6
#self.SFmodel.df_pars['gamma']=1/7
self.df=self.df.merge(self.SFmodel.df_pars[['GeoID','Date','beta','gamma']],
how='left',on=['GeoID','Date'],left_index=True).dropna(subset=['beta','gamma'])
#print(self.df.loc[X[:,-1]])
#Predict SIR parameters instead of cases
#Use only row in training set AND with SIR pars not being nans
idx=np.array(list(set(X[:,-1]).intersection(set(self.df.index))),
dtype=int).reshape(-1)
#print(len(idx),idx[:10])
self.y_pars=np.array(self.df.loc[idx, ['beta','gamma']])
#print(self.y_pars.shape)
self.X_pars=X[np.in1d(X[:,-1],idx)]
#print(self.X_pars.shape)
# remove last column (df.index)
# remove first lookback_days columns: not using cases to predict parameters
self.X_pars=self.X_pars[:,self.lookback_days+1:-1]
#print('ML training:',self.X_pars.shape)
#exit()
#self.X_pars=self.X_pars[:,:-1]
self.MLmodel=eval(str(self.MLmodel))
#print('\n ML model: ',type(self.MLmodel))
self.MLmodel.fit(self.X_pars,self.y_pars)
self.TMAE=mae(self.MLmodel.predict(self.X_pars),self.y_pars)
print('Training MAE on SIR params:', self.TMAE)
return self
def predict_pars(self,X):
return self.MLmodel.predict(X[:,self.lookback_days+1:-1])
#except Exception as e:
# print(e)
# print(X[:,self.lookback_days+1],X[:,-1])
# print(X[:,self.lookback_days+1:-1].shape)
# exit()
#return self.MLmodel.predict(X[:,:-1])
def predict_chunk(self,X_chunk):
y_chunk=[]
for i in range(X_chunk.shape[0]):
pars=self.predict_pars(X_chunk[i,:].reshape(1,-1))
beta=pars[0][0]
gamma=pars[0][1]
time_integ=[0,1]
N=X_chunk[i,self.lookback_days+1]
#I0=X_chunk[i,self.lookback_days-1].sum()
#Ic0=X_chunk[i,self.lookback_days]
#R0=Ic0-I0
I0=X_chunk[i,self.lookback_days-self.infection_days:self.lookback_days].sum()
Ic0=X_chunk[i,self.lookback_days]
R0=Ic0-I0
S0=N-I0-R0
x0=(S0,I0,Ic0,R0)
#print(x0)
#I=solve_ivp(self.SIR_ode,[time_integ[0],time_integ[-1]],
# x0,args=(N,beta,gamma),t_eval=time_integ).y[1][1]
I=np.diff(solve_ivp(self.SIR_ode,[time_integ[0],time_integ[-1]],
x0,args=(N,beta,gamma),t_eval=time_integ).y[2])[-1]
y_chunk.append(I)
return y_chunk
def predict(self,X):
if self.paral_predict:
nchunks=self.nprocs*10
X_chunks=np.array_split(X,nchunks)
pool=mp.Pool(self.nprocs)
y_chunks=list(pool.imap(self.predict_chunk,X_chunks))
pool.close()
pool.join()
y=[item for sublist in y_chunks for item in sublist]
return y
else:
y=self.predict_chunk(X)
return y
def score(self,X_test,y_test):
#check_X_y(X_test,y_test)
#y_pred=self.predict(X_test)
# Take negative to mean "score"
#cases_mae=mae(y_pred,y_test)
#print('Training MAE on cases:',cases_mae)
#return -cases_mae
return -self.TMAE
#Questa non ha il fit, non è usabile
class SIRRegressor(BaseEstimator, RegressorMixin):
''' Model that use the features to extract SIR parameters and compute predictions.
single_pred_days: number of next days to predict. Predictions could be of more than one day, so to have a multi-variate regression (NOT TESTED YET)'''
def __init__(self, single_pred_days=1,lookback_days=30,infection_days=15):
#self.demo_param = demo_param
self.params=None
self.single_pred_days=single_pred_days
self.lookback_days=lookback_days
self.infection_days=infection_days
def __SIR_ode(self,t,x0, N, beta, gamma):
S, I, Ic, R = x0
dS = -beta * S * I / N
dI = beta * S * I / N - gamma * I
# Computes the cumulative new cases during the integration period
dIc= -dS
dR = gamma * I
return dS, dI, dIc,dR
def __SIR_integrate(self,ttotp,x0,N,beta,gamma):
sol=solve_ivp(self.__SIR_ode,[ttotp[0],ttotp[-1]],x0,args=(N,beta,gamma),t_eval=ttotp)
#lung=len(sol.y[0])
# The only variable to predict is "NewCases", i.d. the difference of the cumulative Ic
return np.diff(sol.y[2],prepend=x0[2])#.reshape(lung,1).flatten()
def fit(self, X, y):
# Check that X and y have correct shape
X, y = check_X_y(X, y)
self.X_ = X
self.y_ = y
#Assign random parameters, to be used for scalar product with the features.
# We sample two rows: one to get beta and one to get gamma
self.params=np.random.uniform(0,1,(2,X.shape[1]))
# Return the classifier
return self
def predict(self, X):
# Check is fit had been called
check_is_fitted(self)
# Input validation
X = check_array(X)
# Normalization is to have small values also with random parameters (to be removed later maybe)
X_normed = (X - X.min(0)) / X.ptp(0)
X_normed=np.nan_to_num(X_normed)
y_pred=[]
time_integ=np.linspace(0,self.single_pred_days,self.single_pred_days+1)
for i in tqdm(range(X.shape[0]),total=X.shape[0]):
# Apply ML model parameters to get SIR parameters
# Division for X.shape[1] should ensure values lower than 1, but should be removed when there will be a real training
beta=self.params[0].dot(X_normed[i])/X.shape[1]
gamma=self.params[1].dot(X_normed[i])/X.shape[1]
# Total population
N=X[i,self.lookback_days+1]
# Currently infected individuals
I0=X[i,self.lookback_days-self.infection_days:self.lookback_days-1].sum()
# Recovered individuals (taken as current total confirmed cases)
R0=X[i,self.lookback_days]
# Susceptible individuals
S0=N-I0-R0
# Initial condition of integration
x0=(S0,I0,R0,R0)
Ipred=self.__SIR_integrate(time_integ,x0,N,beta,gamma)[-1]
y_pred.append(Ipred)
return np.array(y_pred)
class SIR_parfinder():
''' DO NOT USE: Class that use the features to extract SIR parameters. Fitted parameters will be keyed by country and date.
single_pred_days: number of next days to predict. Predictions could be of more than one day, so to have a multi-variate regression (NOT TESTED YET)
lookback_days: past days to use. Must be higher than fit_days+infected_days.
infected_days: days previous to day0 to sum the number of currently infected.
fit_days: number of days before the last day to fit the SIR parameters on (only for labelization).
beta_i: initial value for fitting beta (only for labelization).
gamma_i: initial value for fitting gamma (only for labelization).
'''
def __init__(self, df,moving_average=False,
infection_days=7, semi_fit_days=7,
beta_i=0.6, gamma_i=1/7,nprocs=4):
self.infection_days=infection_days
self.semi_fit=semi_fit_days
self.fit_days=semi_fit_days*2+1
self.time_integ=np.linspace(-self.semi_fit,self.semi_fit,self.fit_days)
self.beta_i=beta_i
self.gamma_i=gamma_i
self.nprocs=nprocs
self.df=self.__add_lookback(df,moving_average)
def __add_lookback(self,df,moving_average):
COL = ['NewCases'] if not moving_average else ['MA']
X_cols = df.columns
y_col = COL
geo_ids = df.GeoID.unique()
fit_ids=['CasesDay{}'.format(d) for d in range(-self.semi_fit,self.semi_fit+1)]
self.fit_ids=fit_ids
#print(fit_ids)
df=df[['GeoID','Date','Population']+COL]
df=pd.concat([df,pd.DataFrame(columns=fit_ids+['I0','R0'])])
print('Adding lookback days to the dataframe...')
for g in tqdm(geo_ids):
gdf = df[df.GeoID == g]
all_case_data = np.array(gdf[COL])
nb_total_days = len(gdf)
for d,(idx,row) in enumerate(gdf.iterrows()):
if d>self.semi_fit and d<(nb_total_days-self.semi_fit):
X_cases = all_case_data[d - self.semi_fit:d+self.semi_fit+1].reshape(-1)
try:
df.loc[idx,fit_ids]=X_cases
df.loc[idx,'I0']=all_case_data[d-self.semi_fit- self.infection_days:
d-self.semi_fit+1].sum()
df.loc[idx,'R0']=all_case_data[0:
(d-self.semi_fit-
self.infection_days)].sum()
except ValueError:
print(row.GeoID)
print(row.Date)
print(df.loc[idx-self.semi_fit,fit_ids].shape)
print(X_cases.shape)
raise ValueError('Mismatch in shapes for this entry, check the code...')
return df.dropna()
def __SIR_ode(self,t,x0, N, beta, gamma):
S, I, Ic, R = x0
dS = -beta * S * I / N
dI = beta * S * I / N - gamma * I
# Computes the cumulative new cases during the integration period
dIc= -dS
dR = gamma * I
return dS, dI, dIc,dR
def __SIR_integrate(self,ttotp,x0,N,ti,beta,gamma):
''' Argument ti not used but needed by curve_fit '''
sol=solve_ivp(self.__SIR_ode,[ttotp[0],ttotp[-1]],x0,args=(N,beta,gamma),t_eval=ttotp)
#lung=len(sol.y[0])
# The only variable to predict is "NewCases", i.d. the difference of the cumulative Ic
return np.diff(sol.y[2])#.reshape(lung,1).flatten()
def labelize_chunk(self,df_chunk):
#df_chunk=self.df_chunks[i]
pars_df=df_chunk[['GeoID','Date']].copy()
pars_df['beta']=np.nan
pars_df['gamma']=np.nan
for j,(idx,row) in enumerate(df_chunk.iterrows()):
pars_df.iloc[j,:]=[row.GeoID,row.Date]+list(self.row_fit(row))
return pars_df
def row_initial_conditions(self,row):
'''
Returns the initial condition from the the SIR integration of this row.
'''
N=row.Population
# Currently infected individuals (sum of new cases on the previous infection_days before the first fit day)
I0=row.I0
# Recovered individuals (taken as current total confirmed cases)
R0=row.R0
# Susceptible individuals
S0=N-I0-R0
# Initial condition of integration
x0=(S0,I0,R0,R0)
return N,x0
def row_observed_cases(self,row):
'''
Return an array with length fit_days with the real observed new_cases of that row.
'''
return row[self.fit_ids[1:]].values
def row_fit(self,row):
'''
Fit SIR parameters for one observation
'''
N,x0=self.row_initial_conditions(row)
if x0[1]<0:
raise ValueError('Infected was {} for popolation {}'.format(x0[1],X_i[self.lookback_days+1]))
elif x0[1]<1:
popt=np.array([np.nan,np.nan])
else:
fintegranda=partial(self.__SIR_integrate,self.time_integ,x0,N)
popt, pcov = curve_fit(fintegranda, self.time_integ,
self.row_observed_cases(row),
p0=[self.beta_i,self.gamma_i],maxfev=5000,bounds=([0.,0.],
[np.inf,1.]))
return popt.reshape(-1)
def row_predict(self,GeoID,date,pars=None):
'''
Given the SIR parameters, predicts the new cases. The last one is the actual prediction for the final MAE
'''
row=self.df.loc[(self.df.GeoID==GeoID)&(self.df.Date==pd.to_datetime(date)),:].iloc[0,:]
N,x0=self.row_initial_conditions(row)
if np.isnan(pars[0]):
return np.repeat(row.CasesDay0, self.fit_days)[1:]
if pars is not None:
beta=pars[0]
gamma=pars[1]
else:
beta=self.df_pars.loc[(self.df_pars.GeoID==GeoID) &
(self.df_pars.Date==pd.to_datetime(date)),'beta'].iloc[0]
gamma=self.df_pars.loc[(self.df_pars.GeoID==GeoID) &
(self.df_pars.Date==pd.to_datetime(date)),'gamma'].iloc[0]
Ipred=self.__SIR_integrate(self.time_integ,x0,N,self.time_integ,beta,gamma)
return Ipred
def fit(self,save_to=None):
'''
Fit SIR parameters on all the data.
save_to: path to save the results in pickle format. Results are saved a Pandas DataFrame having columns: GeoID,Date,beta,gamma
'''
if self.semi_fit<3:
raise ValueError('ValueError: semi_fit_days should be higher than 2')
nchunks=self.nprocs*10
self.df_chunks = np.array_split(self.df,nchunks)
#print(type(self.df_chunks))
empty_cunks=[df for df in self.df_chunks if df.shape[0]==0]
if len(empty_cunks):
print('{} empty chunks'.format(len(empty_chunks)))
#nchunks=len(self.X_chunks)
pool=mp.Pool(self.nprocs)
outputs=list(tqdm(pool.imap(self.labelize_chunk,self.df_chunks),total=nchunks))
pool.close()
pool.join()
self.df_pars= | pd.concat(outputs) | pandas.concat |
# %% [markdown]
# # Bagging
#
# In this notebook, we will present the first ensemble using bootstrap samples
# called bagging.
#
# Bagging stands for Bootstrap AGGregatING. It uses bootstrap (random sampling
# with replacement) to learn several models. At predict time, the predictions
# of each learner are aggregated to give the final predictions.
#
# First, we will generate a simple synthetic dataset to get insights regarding
# bootstraping.
# %%
import pandas as pd
import numpy as np
# create a random number generator that will be used to set the randomness
rng = np.random.RandomState(0)
def generate_data(n_samples=50):
"""Generate synthetic dataset. Returns `data_train`, `data_test`,
`target_train`."""
x_max, x_min = 1.4, -1.4
len_x = x_max - x_min
x = rng.rand(n_samples) * len_x - len_x / 2
noise = rng.randn(n_samples) * 0.3
y = x ** 3 - 0.5 * x ** 2 + noise
data_train = pd.DataFrame(x, columns=["Feature"])
data_test = pd.DataFrame(
np.linspace(x_max, x_min, num=300), columns=["Feature"])
target_train = pd.Series(y, name="Target")
return data_train, data_test, target_train
# %%
import matplotlib.pyplot as plt
import seaborn as sns
data_train, data_test, target_train = generate_data(n_samples=50)
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
_ = plt.title("Synthetic regression dataset")
# %% [markdown]
# The link between our feature and the target to predict is non-linear.
# However, a decision tree is capable of fitting such data.
# %%
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=3, random_state=0)
tree.fit(data_train, target_train)
y_pred = tree.predict(data_test)
# %%
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
plt.plot(data_test, y_pred, label="Fitted tree")
plt.legend()
_ = plt.title("Predictions by a single decision tree")
# %% [markdown]
# Let's see how we can use bootstraping to learn several trees.
#
# ## Bootstrap sample
#
# A bootstrap sample corresponds to a resampling, with replacement, of the
# original dataset, a sample that is the same size as the original dataset.
# Thus, the bootstrap sample will contain some data points several times while
# some of the original data points will not be present.
#
# We will create a function that given `data` and `target` will return a
# bootstrap sample `data_bootstrap` and `target_bootstrap`.
# %%
def bootstrap_sample(data, target):
# indices corresponding to a sampling with replacement of the same sample
# size than the original data
bootstrap_indices = rng.choice(
np.arange(target.shape[0]), size=target.shape[0], replace=True,
)
data_bootstrap_sample = data.iloc[bootstrap_indices]
target_bootstrap_sample = target.iloc[bootstrap_indices]
return data_bootstrap_sample, target_bootstrap_sample
# %% [markdown]
# We will generate 3 bootstrap samples and qualitatively check the difference
# with the original dataset.
# %%
bootstraps_illustration = | pd.DataFrame() | pandas.DataFrame |
# Up to now, focused on 1D and 2D data in Series/DataFrames (respectively)
# To go to 3D and 4D, PD provides Panel/Panel4D objects to natively handle
# In practice, often circumvent those objects and use hierarchal indexing
# aka multi-indexing
# This allows for multiple index levels within a single index,
# thus getting more dimensions packed into the Series/DataFrames objects
# As an introduction to the practice, explore creation/use of "MultiIndex"
import pandas as pd
import numpy as np
##############################
### A Multiply Indexed Series
# How to represent 2D data in a 1D Series
## ---------- The wrong way ----------
# Use tuples for keys to make life bad:
index = [('California', 2000), ('California', 2010),
('New York', 2000), ('New York', 2010),
('Texas', 2000), ('Texas', 2010)]
populations = [33871648, 37253956,
18976457, 19378102,
20851820, 25145561]
pop = pd.Series(populations, index=index)
pop
# The one nice part, can index or slice on this multiple index:
pop[('California', 2010):('Texas', 2000)]
# BUT, if you want all vals for 2010, need to do some ugly/inefficient work:
pop[[i for i in pop.index if[1] == 2010]]
## ---------- The better way ----------
# Use Pandas Multindex object - essentially an extension of the tuple
# with pre-built operations to keep code clean as efficient as possible
index = pd.MultiIndex.from_tuples(index)
index
# from output, can observe multiple levels of indexing
# state names and years in this case, and multiple labels for each data point
# can re-index the series with this MultiIndex to see hierarchal rep. of data
pop = pop.reindex(index)
pop
# first 2 columns of series show multiple index values
# third column shows the data
# first column blank entries are repetitions of the prior (hence, hierarchal)
# now we can use familiar Pandas slicing notation:
pop[:, 2010]
## MultiIndex as extra dimension
# Could easily have stored the data in above example using DataFrame
# Can actually morph it by using unstack()
pop_df = pop.unstack()
pop_df
# stack() does the opposite:
pop_df.stack()
# with MultiIndexing, can go beyond this trivial example to store 3+ dimensions
# in both Series and DataFrame objects
# for example, may want demographic data for each state population
# with multiindex, its simple as just adding another column to the DF
pop_df = pd.DataFrame({'total': pop,
'under18': [9267089, 9284094,
4687374, 4318033,
5906301, 6879014]})
pop_df
# also, ufuncs still work with MultiIndexed data
# compute fraction of people under 18 by year:
f_u18 = pop_df['under18'] / pop_df['total']
f_u18.unstack() # unstack() for formatting. neat!
##############################
### Methods of MultiIndex Creation
# The most straightforward way to construct mutiply indexed Series or DataFrames
# is to pass a list of 2 or more index arrays to the constructor
df = pd.DataFrame(np.random.rand(4, 2),
index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]],
columns=['data1', 'data2'])
# Can pass a dictionary with correctly config'd tuples as keys,
# pandas will auto-recognize and use MultiIndex by default
data = {('California', 2000): 33871648,
('California', 2010): 37253956,
('Texas', 2000): 20851820,
('Texas', 2010): 25145561,
('New York', 2000): 18976457,
('New York', 2010): 19378102}
pd.Series(data)
## Explicit MultiIndex Constructors
# sometimes you need additional flexibility only available via explicit methods
# As example before, can construct from a list of arrays (w/ index vals at each level)
pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], [1, 2, 1, 2]])
# Can construct it from a list of tuples (giving extended index vals of each point)
| pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)]) | pandas.MultiIndex.from_tuples |
#!/usr/bin/env python
from __future__ import absolute_import
import re
import pandas as pd
import util
import os
import xgmml
from six.moves import range
from six.moves import zip
class Node(object):
def __init__(self, id, label='', left=None, right=None, similarity=1.0):
self.id, self.label, self.left, self.right, self.similarity = id, label, left, right, similarity
if self.label=='': self.label=self.id
def has_child(self):
return not(self.left is None and self.right is None)
def all_children(self, l_keep_node=False, l_include_self=True):
# need to rewrite to not use recursion, otherwise, won't work for deep trees
children=[]
queue=[self]
base=self.id
while len(queue)>0:
x=queue.pop(0)
if x.has_child():
if l_keep_node:
children.append(x.id)
if x.left is not None:
queue.append(x.left)
if x.right is not None:
queue.append(x.right)
elif not l_keep_node:
children.append(x.id)
if not l_include_self:
children=[x for x in children if x!=base]
return children
#def all_children(self, l_keep_node=False, l_include_self=True):
# # need to rewrite to not use recursion, otherwise, won't work for deep trees
# if l_keep_node:
# return self.all_children_nodes(l_include_self=l_include_self)
# children=[]
# if self.left is not None:
# if self.left.has_child():
# children.extend(self.left.all_children())
# else:
# ch ildren.append(self.left.id)
# if self.right is not None:
# if self.right.has_child():
# children.extend(self.right.all_children())
# else:
# children.append(self.right.id)
# return children
def all_children_nodes(self, l_include_self=True):
"""Only keep children nodes, not leaves."""
return self.all_children(l_keep_node=True, l_include_self=l_include_self)
#children=[]
#if l_include_self and self.has_child(): children.append(self.id)
#if self.left is not None and self.left.has_child():
# children.extend(self.left.all_children_nodes(l_include_self=True))
#if self.right is not None and self.right.has_child():
# children.extend(self.right.all_children_nodes(l_include_self=True))
#return children
# return the ID of the most representative node, and the # of nodes it represents
#def representative(self):
# if not self.has_child():
# return (self.id, 1)
# else:
# (ln, i_l)=self.left.representative()
# (rn, i_r)=self.right.representative()
# if (i_l<i_r):
# return (rn, i_l+i_r)
# else:
# return (ln, i_l+i_r)
def all_nof_leaves(self):
"""compute the total number of leaves under each node"""
c_cnt={}
q=[self]
seen=[]
while len(q):
k=q.pop(0)
seen.append(k)
if k.left is not None:
q.append(k.left)
if k.right is not None:
q.append(k.right)
seen.reverse()
for x in seen:
if not x.has_child():
c_cnt[x.id]=1
else:
c_cnt[x.id]=0
if x.left is not None:
c_cnt[x.id]+=c_cnt[x.left.id]
if x.right is not None:
c_cnt[x.id]+=c_cnt[x.right.id]
return c_cnt
def representative(self):
"""return the ID of the most representative node, and the # of nodes it represents
Rewrite to avoid recursion"""
# first compute the total number of leaves under each node
c_cnt=self.all_nof_leaves()
# now pick the representative gene from the larger tree branch
k=self
while True:
if not k.has_child():
return (k.id, c_cnt[self.id])
else:
if c_cnt[k.left.id]<c_cnt[k.right.id]:
k=k.right
else:
k=k.left
#def node_similarities(self):
# """Return list of (node, similarity) and sort them from small to large"""
# if self.has_child():
# out=self.left.node_similarities()+self.right.node_similarities()
# out.append((self, self.similarity))
# return sorted(out, key=lambda(x): x[1])
# else:
# return []
def node_similarities(self):
"""Return list of (node, similarity) and sort them from small to large
Rewrite to avoid recursion."""
q=[self]
out=[]
while len(q):
k=q.pop(0)
if k.left is not None:
q.append(k.left)
if k.right is not None:
q.append(k.right)
if k.has_child():
out.append((k, k.similarity))
return sorted(out, key=lambda x: x[1])
## n_picks most representative nodes
#def representatives(self, n_picks=1):
# if n_picks==1 or not self.has_child():
# return [self.representative()]
# else:
# out=[]
# # take the n_picks most representative subtrees
# L_nodes=self.node_similarities()[:n_picks-1]
# c_nodes={ n.id:True for n,s in L_nodes }
# for node,s in L_nodes:
# #print node.id, "<<<", node.left.id, ">>>", node.right.id
# if node.left.id not in c_nodes:
# out.append(node.left.representative())
# if node.right.id not in c_nodes:
# out.append(node.right.representative())
# return sorted(out, key=lambda(x): -x[1])
# n_picks most representative nodes, each node must represent at least min_size nodes
def representatives(self, n_picks=1, min_size=1, l_keep_members=False):
def cut_grps(L_nodes, i_cut):
L=L_nodes[:i_cut]
c_nodes={ n.id:True for n,s in L }
out=[]
for node,s in L:
#print node.id, "<<<", node.left.id, ">>>", node.right.id
if node.left.id not in c_nodes and c_cnt[node.left.id]>=min_size:
X=node.left.representative()
if l_keep_members: X=(X[0], X[1], node.left.all_children())
out.append(X)
if node.right.id not in c_nodes and c_cnt[node.right.id]>=min_size:
X=node.right.representative()
if l_keep_members: X=(X[0], X[1], node.right.all_children())
out.append(X)
return (len(out), out, )
c_cnt=self.all_nof_leaves()
if n_picks==1 or not self.has_child():
if c_cnt[self.id]>=min_size:
X=self.representative()
if l_keep_members: X=(X[0], X[1], self.all_children())
return [X]
else:
return []
else:
# take the n_picks most representative subtrees
L_nodes=self.node_similarities()
if min_size>1:
L_nodes=[(n,s) for n,s in L_nodes if c_cnt[n.id]>=min_size]
out=[]
# have not found a clever way, so just try different cutoffs, until we get n_picks groups
# if min_size==1, it should get it right the first time
(best_i, best_n, best_out)=(1, 1, [])
for i in range(n_picks-1, len(L_nodes)+1):
n, out=cut_grps(L_nodes, i)
#print ">>", i, n, n_picks
if n>=n_picks:
return sorted(out, key=lambda x: -x[1])
if abs(n-n_picks)<abs(best_n-n_picks) or i==n_picks-1:
(best_i, best_n, best_out)=(i, n, out)
#print ">>>", best_i, best_n
return sorted(best_out, key=lambda x: -x[1])
#def cut_(self, min_similarity=0.8, l_keep_node=False):
# """Replaced by cut() below. If l_keep_node, only output the nodes, instead of genes."""
# if self.similarity>=min_similarity:
# if self.has_child():
# return [self.all_children(l_keep_node=l_keep_node)]
# else:
# return [] if l_keep_node else [[self.id]]
# else:
# out=[]
# if self.left is not None:
# out_left=self.left.cut(min_similarity=min_similarity, l_keep_node=l_keep_node)
# if out_left: out.extend(out_left)
# if self.right is not None:
# out_right=self.right.cut(min_similarity=min_similarity, l_keep_node=l_keep_node)
# if out_right: out.extend(out_right)
# return out
def cut(self, min_similarity=0.8, l_keep_node=False):
"""If l_keep_node, only output the nodes, instead of genes. Rewrite it to avoid recursion, so it works for flat trees."""
q=[self]
out=[]
while len(q):
k=q.pop(0)
if k.similarity>=min_similarity:
if k.has_child():
out.append(k.all_children(l_keep_node=l_keep_node))
elif not l_keep_node:
out.append([k.id])
else:
if k.left is not None:
q.append(k.left)
if k.right is not None:
q.append(k.right)
return out
#def bicut(self, high_similarity=0.8, low_similarity=0.6):
# if self.similarity<low_similarity:
# out=[]
# #print self.id, "too low"
# if self.left is not None:
# #if self.left.has_child():
# out_left=self.left.bicut(high_similarity=high_similarity, low_similarity=low_similarity)
# if out_left: out.extend(out_left)
# if self.right is not None:
# #if self.right.has_child():
# out_right=self.right.bicut(high_similarity=high_similarity, low_similarity=low_similarity)
# if out_right: out.extend(out_right)
# elif self.similarity<high_similarity:
# #print self.id, "middle"
# out=[[]]
# if self.left is not None:
# #if self.left.has_child():
# out_left=self.left.bicut(high_similarity=high_similarity, low_similarity=low_similarity)
# out[0].extend(out_left[0])
# #else:
# # out[0].extend([self.left.id])
# if self.right is not None:
# #if self.right.has_child():
# out_right=self.right.bicut(high_similarity=high_similarity, low_similarity=low_similarity)
# out[0].extend(out_right[0])
# #else:
# # out[0].extend([self.right.id])
# if not self.has_child(): out.append([self.id])
# else: # >=high_similarity
# #print self.id, "high"
# if not self.has_child():
# out=[[[self.id]]]
# else:
# out=[[self.all_children()]]
# #print "return >>>", self.id, out
# return out
def bicut(self, high_similarity=0.8, low_similarity=0.6):
"""Rewrite into non-recursive version"""
q=[self]
out=[]
while(len(q)):
k=q.pop(0)
if k.similarity<low_similarity:
if k.left is not None:
q.append(k.left)
if k.right is not None:
q.append(k.right)
elif k.similarity<high_similarity:
out2=[]
if k.left is not None:
out2.extend(k.left.cut(min_similarity=high_similarity, l_keep_node=False))
if k.right is not None:
out2.extend(k.right.cut(min_similarity=high_similarity, l_keep_node=False))
if not k.has_child():
out2.extend([k.id])
out.append(out2)
else: # >=high_similarity
if not k.has_child():
out.append([[k.id]])
else:
#print k.all_children()
out.append([k.all_children()])
return out
def __str__(self, level=0):
s=''
if self.has_child():
if self.left is not None: s+=self.left.__str__(level+1)
if self.right is not None: s+=self.right.__str__(level+1)
else:
s=' '*level+self.id+':'+self.label+'\n'
return s
class Tree(object):
def __init__(self, s_file='', Z=None, l_gene_tree=True):
"""Z: linkage matrix, if None, assume s_file is not empty"""
self.l_gene_tree=l_gene_tree
self.root=Node('ROOT')
self.l_gene_tree=l_gene_tree # gene tree or array tree
self.c_name={}
self.c_node={}
self.size=0
self.parent={} # track the parent node for each node
self.tree_file=None
if Z is not None:
self.l_gene_tree=True
r,c=Z.shape
n=r+1
r_dist=max(Z[:, 2].max(), 1.0)
for i in range(r):
id_l=str(int(Z[i, 0]))
id_r=str(int(Z[i, 1]))
id_n=str(n+i)
r=max(1.0-Z[i, 2]/r_dist, 0.0)
self.new_node(id_n, label=self.c_name.get(id_n, ''), left=self.new_node(id_l), right=self.new_node(id_r), similarity=r)
self.parent[id_l]=id_n
self.parent[id_r]=id_n
self.root=self.get_node(id_n)
self.size=n-1
else:
self.l_gene_tree=l_gene_tree
if re.search(r'\.[ag]tr$', s_file):
if re.search(r'\.atr$', s_file):
l_gene_tree=False
s_file=re.sub(r'\.[ag]tr$', '', s_file)
self.root=Node('ROOT')
self.l_gene_tree=l_gene_tree # gene tree or array tree
self.c_name={}
self.c_node={}
self.size=0
self.parent={} # track the parent node for each node
if not os.path.exists(s_file+".cdt"):
util.error_msg("File not exist: "+s_file+".cdt!")
f=open(s_file+'.cdt')
S_header=f.readline().strip().split("\t")
if not l_gene_tree:
while True:
line=f.readline()
if not line: break
if line.startswith("AID\t"):
S_AID=line.strip().split("\t")
self.c_name={s:x for s,x in zip(S_AID, S_header) if str(s).startswith('ARRY')}
break
else:
s_col='GENE'
if s_col not in S_header and 'NAME' in S_header:
s_col='NAME'
i_GID=util.index('GID', S_header)
i_NAME=util.index(s_col, S_header)
while True:
line=f.readline()
if not line: break
if line.startswith('AID') or line.startswith('EWEIGHT'):
continue
S=line.strip().split("\t")
self.c_name[S[i_GID]]=S[i_NAME]
f.close()
self.size=len(self.c_name)
if self.size==0: error_msg("Tree:__init_: No node is found to build the tree!")
s_filename=s_file+('.gtr' if l_gene_tree else '.atr')
# check if file has column header
self.tree_file=s_filename
df=Tree.read_tree_file(s_filename)
self.parse(df)
def get_node(self, id):
return self.c_node.get(id, None)
#def nof_nodes(self):
# # this includes both NODE* and GENE*
# return len(self.c_node)
def nof_leaves(self):
# c_node contains n leaves and n-1 nodes
return (len(self.c_node)+1)/2
def get_node_by_name(self, s_name):
for k,v in self.c_name.items():
if v==s_name:
return self.get_node(k)
return None
def get_parents(self, id):
parents=[]
while True:
p=self.parent.get(id, None)
if p is None: break
parents.append(p)
id=p
return parents
# find the nearest node that contains all ids in id_list as child
# this is useful to reverse find the subtree using subtree members
# e.g., find the tree contains a group made by cut
# warning, if id_list has only one gene, it will return itself
def get_umbrella_node(self, id_list):
if len(id_list)==0:
return None
if len(id_list)==1:
return self.get_node(id_list[0])
reference_gene=None # the node that descends fastest in tree
reference_depth=0
paths={}
for id in id_list:
p=self.get_parents(id)
paths[id]=p
if reference_gene is None or len(p)<reference_depth:
reference_gene=id
reference_depth=len(p)
#print paths
#print reference_gene, reference_depth, paths[reference_gene]
for p in paths[reference_gene]:
#print ">>>>>>>> "+p
l_all=True
for k,v in paths.items():
if p not in v:
l_all=False
break
if l_all: return self.get_node(p)
return None
def new_node(self, id, label='', left=None, right=None, similarity=1.0):
if self.get_node(id) is None:
self.c_node[id]=Node(id, label, left=left, right=right, similarity=similarity)
return self.get_node(id)
def parse(self, df):
n=len(df)
for i in range(n):
id_l=df.ix[i, 'left']
id_r=df.ix[i, 'right']
id_n=df.ix[i, 'node']
r=float(df.ix[i, 'similarity'])
#print id_n, id_l, id_r, r
self.new_node(id_n, label=self.c_name.get(id_n, ''), left=self.new_node(id_l), right=self.new_node(id_r), similarity=r)
self.parent[id_l]=id_n
self.parent[id_r]=id_n
self.root=self.get_node(id_n)
self.size=n
def representatives(self, n_picks=1, min_size=1, l_keep_members=False):
return self.root.representatives(n_picks=n_picks, min_size=min_size, l_keep_members=l_keep_members)
def cut(self, min_similarity=0.8, l_keep_node=False):
return self.root.cut(min_similarity=min_similarity, l_keep_node=l_keep_node)
def bicut(self, high_similarity=0.8, low_similarity=0.6):
return self.root.bicut(high_similarity=high_similarity, low_similarity=low_similarity)
@staticmethod
def read_tree_file(s_filename):
f=open(s_filename, "r")
s=f.readline()
l_has_header = s.startswith('NODEID')
f.close()
if l_has_header:
df=pd.read_table(s_filename)
else:
df=pd.read_table(s_filename, header=None)
S=['node','left','right','similarity']
if len(df.header())==5:
S.append('color')
df.columns=S
return df
@staticmethod
def color_map(nodes, cm=None):
"""create a dictionary of node-hex color mapping
nodes:
(1) dict of {node_id: color}, nodes not found in the dict will be colored as black
value color:
(a) matplotlib.colors, tuple of floats, or hex string #FF0000
(b) int: index into cm (a list of colors)
(c) float or int, when cm is not a list, values will be normalized into
[0,1] and colormap cm is used to translate the value into a color
(2) list of list, [['node1','node5'],['node2']], each sublist is assigned one color
In this case cm should be a list of colors of the same length ['red','blue']
cm: matplotlib.mcolors.LinearSegmentedColormap or a list of colors
if None, we use rainbow colormap
Examples:
Tree.color_map({'node1':'#ff0000', 'node3':'#0000ff'})
Tree.color_map({'node1':0, 'node3':1}, ['#ff0000','#0000ff'])
Tree.color_map({'node1':0, 'node3',1}) # cm is set to matplotlib.cm.gist_rainbow
Tree.color_map([['node1'], ['node2','node3']], ['#ff0000','#0000ff'])
return dict of {node: hex_color}
"""
import matplotlib
import matplotlib.cm
import matplotlib.colors as mcolors
if cm is None:
cm=matplotlib.cm.gist_rainbow
if type(nodes) is dict:
R=[]
for k,v in nodes.items():
if type(v) in (int, float):
R.append(v)
if len(R):
r_min,r_max=min(R), max(R)
for k,v in nodes.items():
if type(v) is int:
if type(cm) is list:
nodes[k]=list[v % len(cm)]
else: # cm must be a colormap
nodes[k]=cm((v-r_min)/(r_max-r_min))
elif type(v) is float:
nodes[k]=cm((v-r_min)/(r_max-r_min))
else: # nodes must be a list
c={}
n=len(nodes)
for i,X in enumerate(nodes):
if type(cm) is list:
clr=cm[i%n]
else:
clr=cm(i*1.0/(n-1)) if n>1 else cm(1.0)
for x in X:
c[x]=clr
nodes=c
for k,v in nodes.items():
if type(v) is tuple:
v=[ min(max(int(x*255),0), 255) for x in v]
v='#%02x%02x%02x' % tuple(v[:3])
nodes[k]=v
return nodes
def color(self, nodes, l_name_to_id=True, cm=None):
"""Color tree nodes
nodes: dict or list of nodes lists
nodes and colormap cm are combined to passed to Tree.color_map (see document)
l_name_to_id: bool, default True, use leave name or node_id
Warning: this method color nodes, so if leave name is provided and two leaves
under the same node has different colors, only one color is used.
"""
df=Tree.read_tree_file(self.tree_file)
# ['node','left','right','similarity', 'color']
df['color']='#000000'
c_id2node={}
c_name2id={}
if l_name_to_id:
for k,v in self.c_name.items():
c_name2id[v]=k
for i in df.index:
if not df.ix[i, 'left'].startswith('NODE'):
c_id2node[df.ix[i, 'left']]=df.ix[i, 'node']
if not df.ix[i, 'right'].startswith('NODE'):
c_id2node[df.ix[i, 'right']]=df.ix[i, 'node']
t=df.ix[:, ['node','color']]
t.set_index('node', inplace=True)
c=Tree.color_map(nodes, cm)
for k,v in c.items():
if type(v) is tuple:
v=[ min(max(int(x*255),0), 255) for x in v]
v='#%02x%02x%02x' % tuple(v[:3])
if l_name_to_id:
k=c_name2id.get(k,k)
k=c_id2node.get(k,k)
if k in t.index:
t.ix[k, 'color']=v
else:
util.warn_msg('Node not in tree: '+k)
df['color']=list(t.color)
df.columns=['NODEID','LEFT','RIGHT','CORRELATION','NODECOLOR']
util.df2sdf(df, s_format="%.4f").to_csv(self.tree_file, index=False, sep="\t")
def color_cut(self, min_similarity=0.8, S_COLOR=None):
#colorbrewer2.org, qualitative, 5-class set
#S_COLOR=['#E41A1C', '#277EB8', '#4DAF4A', '#984EA3', '#FF7F00']
c_cut=self.cut(min_similarity=min_similarity, l_keep_node=True)
self.color(c_cut, False, S_COLOR)
# tries to pick up to n_picks most representative genes within each group
def cut2table(self, c_cut, n_picks=1):
rows=[]
for i,grp in enumerate(c_cut):
tr=self.get_umbrella_node(grp)
L_rep=tr.representatives(n_picks)
c_rep={ n:c for n,c in L_rep }
sz=len(grp)
for g in grp:
rows.append({'GroupID':i+1, 'GroupSize':sz, 'Entry':self.c_name.get(g, g), 'RepresentCounts':c_rep.get(g, 0)})
df=pd.DataFrame(rows)
df=df.sort_values(['GroupSize', 'GroupID'], ascending=[False, True])
return df
def bicut2table(self, c_bicut, n_opt_picks=1, n_ok_picks=0):
rows=[]
for i,ok_grp in enumerate(c_bicut):
ok_g=[]
for opt_g in ok_grp:
ok_g.extend(opt_g)
ok_sz=len(ok_g)
c_ok_rep={}
if n_ok_picks:
tr=self.get_umbrella_node(ok_g)
L_rep=tr.representatives(n_ok_picks)
c_ok_rep={ n:c for n,c in L_rep }
for j,opt_g in enumerate(ok_grp):
opt_sz=len(opt_g)
c_opt_rep={}
if n_opt_picks:
tr=self.get_umbrella_node(opt_g)
L_rep=tr.representatives(n_opt_picks)
c_opt_rep={ n:c for n,c in L_rep }
for g in opt_g:
one={'OkayGroupID':i+1, 'OkayGroupSize':ok_sz, 'OptimalGroupID':j+1, 'OptimalGroupSize':opt_sz, 'Entry':self.c_name.get(g, g)}
if n_ok_picks:
one['OkayRepresentCounts']=c_ok_rep.get(g,0)
if n_opt_picks:
one['OptimalRepresentCounts']=c_opt_rep.get(g,0)
rows.append(one)
df=pd.DataFrame(rows)
df=df.sort_values(['OkayGroupSize', 'OptimalGroupSize', 'OkayGroupID', 'OptimalGroupID'], ascending=[False, False, True, True])
return df
def __str__(self):
return str(self.root)
def to_network_(self, l_digraph=False):
out=[]
for x in self.c_node.values():
is_node=1 if x.has_child() else 0
if x==self.root:
is_node=2
out.append({'Gene':x.id, 'Symbol': self.c_name.get(x.id, x.id), 'IsNode': is_node})
t_node= | pd.DataFrame(out) | pandas.DataFrame |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Plots
import matplotlib.pyplot as plt
import seaborn as sns
# Preprocessing
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
import gc
# LightGBM framework
import lightgbm as lgb
"""
From github:
A fast, distributed, high performance gradient boosting (GBDT, GBRT, GBM or MART)
framework based on decision tree algorithms,
used for ranking, classification and many other machine learning tasks.
LightGBM is a gradient boosting framework that uses tree based learning algorithms.
It is designed to be distributed and efficient with the following advantages:
Faster training speed and higher efficiency
Lower memory usage
Better accuracy
Parallel and GPU learning supported
Capable of handling large-scale data
"""
########
# Load the data
########
#maing train and test
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
#load supplemental taxi route data
train_fastest_1 = pd.read_csv('fastest_routes_train_part_1.csv')
train_fastest_2 = | pd.read_csv('fastest_routes_train_part_2.csv') | pandas.read_csv |
from typing import Any
from typing import Dict
from typing import Optional
import pandas
import pytest
from evidently.model_profile.sections.classification_performance_profile_section import \
ClassificationPerformanceProfileSection
from .helpers import calculate_section_results
from .helpers import check_profile_section_result_common_part
from .helpers import check_section_without_calculation_results
def check_classification_performance_metrics_dict(metrics: Dict[str, Any]) -> None:
assert 'accuracy' in metrics
assert 'f1' in metrics
assert 'metrics_matrix' in metrics
assert 'precision' in metrics
assert 'recall' in metrics
assert 'metrics_matrix' in metrics
metrics_matrix = metrics['metrics_matrix']
assert isinstance(metrics_matrix, dict)
assert 'accuracy' in metrics_matrix
assert 'macro avg' in metrics_matrix
assert 'weighted avg' in metrics_matrix
confusion_matrix = metrics['confusion_matrix']
assert 'labels' in confusion_matrix
assert isinstance(confusion_matrix['labels'], list)
assert 'values' in confusion_matrix
assert isinstance(confusion_matrix['values'], list)
def test_no_calculation_results() -> None:
check_section_without_calculation_results(ClassificationPerformanceProfileSection, 'classification_performance')
@pytest.mark.parametrize(
'reference_data,current_data', (
(pandas.DataFrame({'target': [1, 1, 3, 3], 'prediction': [1, 2, 1, 4]}), None),
(
pandas.DataFrame({'target': [1, 2, 3, 4], 'prediction': [1, 2, 1, 4]}),
pandas.DataFrame({'target': [1, 1, 3, 3], 'prediction': [1, 2, 1, 4]}),
),
)
)
def test_profile_section_with_calculated_results(reference_data, current_data) -> None:
section_result = calculate_section_results(ClassificationPerformanceProfileSection, reference_data, current_data)
check_profile_section_result_common_part(section_result, 'classification_performance')
result_data = section_result['data']
# check metrics structure and types, ignore concrete metrics values
assert 'metrics' in result_data
metrics = result_data['metrics']
assert 'reference' in metrics
check_classification_performance_metrics_dict(metrics['reference'])
if current_data is not None:
assert 'current' in metrics
check_classification_performance_metrics_dict(metrics['current'])
@pytest.mark.parametrize(
'reference_data, current_data',
(
(
pandas.DataFrame({'target': [1, 2, 3, 4]}),
pandas.DataFrame({'target': [1, 1, 3, 3]}),
),
(
| pandas.DataFrame({'prediction': [1, 2, 3, 4]}) | pandas.DataFrame |
import pandas as pd
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
import numpy as np
import os
from transformers import pipeline
import dataframe_image as dfi
import seaborn as sns
import emoji
from nltk.corpus import stopwords
import re
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
def get_public_metrics(dataset:pd.DataFrame)->pd.DataFrame:
'''Extrae la columna "public_metrics" que se encuentra en el dataset'''
df_public = pd.DataFrame()
for i, row in dataset.iterrows():
public_metrics = eval(row['public_metrics'])
public_metrics['id'] = row['id']
df_i = | pd.DataFrame([public_metrics]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 4 21:52:12 2019
@author: dipesh
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://github.com/duaraanalytics/bankmarketing/blob/master/Analyzing%20Employee%20Churn%20with%20Keras.ipynb
"""
Created on Wed Mar 6 08:51:19 2019
@author: dipesh
"""
# Import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
#from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
#from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
from keras.layers import Dense, Dropout
from keras.models import Sequential
#from keras.callbacks import EarlyStopping, ModelCheckpoint, History
from keras.wrappers.scikit_learn import KerasClassifier
# Load dataset
df = pd.read_csv('bank-additional-full.csv')
# View a sample of the loaded data
df.sample(5)
df.info()
| pd.set_option('display.max_columns', 200) | pandas.set_option |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
| Timestamp("20130101 09:00:00") | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# ## 라이브러리 import
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
COLORS = sns.color_palette()
import chart_studio.plotly as py
import cufflinks as cf
print(cf.__version__)
cf.go_offline()
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
import plotly
plotly.offline.init_notebook_mode()
# ## 그래프 시각화의 한글지원 코드
# In[2]:
import matplotlib
from matplotlib import font_manager, rc
import platform
try :
if platform.system() == 'Windows':
# 윈도우인 경우
font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
else:
# Mac 인 경우
rc('font', family='AppleGothic')
except :
pass
matplotlib.rcParams['axes.unicode_minus'] = False
# # <데이터 소개>
# - 서울시 공공자전거 이용현황(2019년 10월1일 ~ 11월30일)
# ### 연령대별로 EDA
# - 연령대별로는 어떻게 봐야할까?
# - 이용시간, 이동거리, 이용건수를 추출
# - 이용시간 대비 이동거리와 이용건수 비교, 분석
# - 운동량은 날려야할까?
#
# #### 데이터 로드 및 concat
# - 공공데이터 csv파일의 한글깨짐현상
# - 공공데이터 파일의 Encoding은 utf-8방식으로 통일해 주었으면 좋겠지만, 거의 대부분 cp949나 euc-kr방식으로 인코딩 되어 있음
# - 해당 서울시 공공자전거 csv파일의 cp949로 인코딩이 되어 있고 utf8 불러왔을 때, ???? 현상이 나타남
# - utf8 로 변환 후 재로드
# In[4]:
df1 = pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트/EDA프로젝트데이터/서울특별시 공공자전거 이용정보(시간대별)_20190601_20191130(7).csv', encoding='utf-8')
df1 = df1.loc[458674:]
df2 = pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트/EDA프로젝트데이터/서울특별시 공공자전거 이용정보(시간대별)_20190601_20191130(8).csv', encoding='utf-8')
df3 = pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트/EDA프로젝트데이터/서울특별시 공공자전거 이용정보(시간대별)_20190601_20191130(9).csv', encoding='utf-8')
df4 = pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트/EDA프로젝트데이터/서울특별시 공공자전거 이용정보(시간대별)_20190601_20191130(10).csv', encoding='utf-8')
pb_df = pd.concat([df1, df2, df3, df4]).reset_index(drop=True)
pb_df.isnull().sum()
pb_df.dropna(inplace=True)
pb_df
# #### 정규표현식 활용
# - 연령대코드에서 ~10대, ~70대로 표기가 되어 있음
# - 연령대코드 groupby 시, 정렬되지 않는 모습을 보임
# - 정규표현식을 활용하여 '~'를 제거
# In[5]:
pb_df['연령대코드'] = pb_df['연령대코드'].str.replace(pat=r'[~@]', repl = r' ', regex=True)
pb_df
# #### 성별 Columns 대문자로 변환
# In[8]:
pb_df['성별'] = pb_df['성별'].str.upper()
pb_df
# #### 특정row 0값 제거하기
# - null값이 아닌 0제거
# In[9]:
pb_df = pb_df[pb_df.이동거리 != 0]
pb_df
# In[10]:
age_by_df = pb_df[['대여일자','대여구분코드','성별','연령대코드','이용건수','운동량','이동거리','사용시간']]
age_by_df = age_by_df.reset_index(drop=True)
age_by_df['건당 이동거리'] = age_by_df['이동거리']/age_by_df['이용건수']
age_by_df
# #### 성별
# - 성별에 따른 평균 사용시간 탐색
# - 성별에 따른 평균 이동거리 탐색
# In[11]:
resultbysex_0 = round(age_by_df.groupby('성별').mean()['사용시간'].reset_index(name='성별 사용시간'),2)
resultbysex_0
# In[12]:
resultbysex_1 = round(age_by_df.groupby('성별').mean()['이동거리'].reset_index(name='평균이동거리'),2)
resultbysex_1
# In[13]:
resultbysex_2 = round(age_by_df.groupby('성별').mean()['건당 이동거리'].reset_index(name='1건당 이동거리'),2)
resultbysex_2
# In[14]:
result_sex = pd.concat([resultbysex_1,resultbysex_2],axis=1)
result_sex.columns = ['성별', '평균이동거리', '123424', '1건당 이동거리']
result_sex = result_sex.drop('123424', axis=1)
result_sex = result_sex.set_index('성별')
# In[17]:
result_sex[['평균이동거리','1건당 이동거리']].iplot(kind='barh',title='Minute Average', xTitle='SEX', yTitle='VALUES')
# #### 연령대별 대여구분코드를 나누어 이동거리 및 사용시간을 확인
# - 운동량과 탄소량 형변환 (string > float)
# - apply(pd.to_numeric) 방식으로 특정 컬럼 형 변환
# - 숫자e를 보기 편하게
# In[18]:
age_by_df['운동량(float)'] = age_by_df['운동량'].apply(pd.to_numeric, errors = 'coerce')
age_by_df
# In[19]:
pd.options.display.float_format = '{:.2f}'.format
resultbyage_0 = round(age_by_df.groupby('연령대코드').mean()['사용시간'].reset_index(name='연령별 사용시간'),2)
resultbyage_0
# In[20]:
resultbyage_1 = round(age_by_df.groupby('연령대코드').mean()['이동거리'].reset_index(name='평균이동거리'),2)
resultbyage_1['평균이동거리(km)'] = resultbyage_1['평균이동거리'] / 1000
resultbyage_1 = resultbyage_1.drop('평균이동거리', axis=1)
resultbyage_1
# In[22]:
age_datas = pd.concat([resultbyage_0, resultbyage_1], axis=1)
age_datas.columns = ['연령대코드', '연령별 사용시간', '드랍', '평균이동거리(km)']
age_datas.drop('드랍',axis=1, inplace=True)
age_datas
# In[23]:
x = age_datas['연령대코드']
y1 = age_datas['연령별 사용시간']
y2 = age_datas['평균이동거리(km)']
fig, ax1 = plt.subplots(figsize=(16,8))
ax2 = ax1.twinx()
data_y1 = ax1.plot(x, y1, color='b', marker='o', label='평균사용시간')
data_y2 = ax2.plot(x, y2, color='r', marker='s', label='평균이동거리')
ax1.set_xlabel('AGE')
ax1.set_ylabel('평균사용시간')
ax2.set_ylabel('평균이동거리')
ax1.legend()
plt.show()
# #### 연령대별 분당 이동거리, 운동량 데이터 전처리
# In[24]:
age_by_df2 = age_by_df.groupby('연령대코드').sum()[['이동거리','사용시간','운동량(float)']]
y = age_by_df2['이동거리'] / age_by_df2['사용시간']
pd.DataFrame(y)
age_by_df2 = pd.concat([age_by_df2,y], axis=1)
age_by_df2
age_by_df2.columns = ['이동거리','사용시간','운동량(float)','분당이동거리']
age_by_df2['분당운동량'] = age_by_df2['운동량(float)'] / age_by_df2['사용시간']
age_by_df2 = age_by_df2.reset_index()
age_by_df2
# #### 데이터 정규화 작업
# - 데이터 정규화 작업을 위한 데이터 깊은 복사
# In[26]:
age_by_df3 = age_by_df2.copy()
age_by_df3
# In[28]:
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(age_by_df3[['이동거리','사용시간','운동량(float)']])
X = sc.transform(age_by_df3[['이동거리','사용시간','운동량(float)']])
X
# In[55]:
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
preprocessing.minmax_scale(age_by_df3['이동거리'])
# age_by_df3['이동거리']
# scaler = StandardScaler()
# scaler.fit_transform(age_by_df3['이동거리'])
# min_max_scaler = preprocessing.MinMaxScaler()
# x = age_by_df3[['분당이동거리','분당운동량']].values
# x_scaled = min_max_scaler.fit(x)
# print(x_scaled.data_max_)
# output = min_max_scaler.transform(x)
# output = pd.DataFrame(output, columns=age_by_df3[['분당이동거리','분당운동량']].columns)
# output
# In[60]:
age_by_df3 = pd.concat([age_by_df3,output], axis=1)
age_by_df3.columns = ['연령대코드','이동거리','사용시간','운동량(float)','분당이동거리','분당운동량','분당이동거리(norm)','분당운동량(norm)']
age_by_df3 = age_by_df3.set_index('연령대코드')
age_by_df3
# #### 연령대별 분당이동거리, 운동량 그래프
# - 시각화의 특성을 더욱 더 살리기 위해 iplot 을 사용
# - 칼로리소모만을 기준으로 볼 때, 10대, 20대의 경우 이동거리보다 운동량이 적은 것은 천천히 혹은 짧게 여러번 탔다는 것을 의미할 수 있음
# In[61]:
age_by_df3[['분당이동거리(norm)','분당운동량(norm)']].iplot(kind='bar',title='Data per minute', xTitle='AGE', yTitle='NORMALIZING VALUES')
# In[64]:
age_by_df
xy = age_by_df.groupby(['대여일자','연령대코드']).mean()[['운동량(float)','이동거리','사용시간']]
xy = xy.reset_index()
xy
# In[66]:
sns.set(style="white")
sns.relplot(x="이동거리", y="운동량(float)", hue="연령대코드", size='사용시간',
sizes=(40, 400), alpha=.5, palette="muted",
height=7, data=xy)
# ### 날짜별로 EDA
# - 주 단위로 보면 데이터가 어떻게 나올까?
# - 주 단위 시간별 이용시간, 이동거리 등 데이터 전처리
# In[67]:
age_by_df['대여일자'] = pd.to_datetime(age_by_df['대여일자'], infer_datetime_format=True)
age_by_df = age_by_df.set_index('대여일자')
age_by_df
# #### 주별 이동거리(km)
# - km로 변환하여 계산
# - 날씨가 추워지면 추워질수록 이동거리가 줄어드는 현상을 보임
# - 운동량 역시 날씨가 추워지고 이동거리가 짧아지면서 칼로리 소모가 급격히 떨어지는 것을 볼 수 있음
# - 10월 1주는 월요일이 9월에 포함이 되어 빠지게 되서 데이터양이 적게 보임
# In[68]:
time_by_df = age_by_df.resample('W'). sum()['이동거리']
time_by_df.astype(np.float) / 1000
# In[69]:
weekly_momentum = age_by_df.resample('W').sum()['운동량(float)']
weekly_momentum.astype(np.float)
# In[70]:
date_by_df = age_by_df.copy()
date_by_df['운동량'] = date_by_df['운동량'].astype(np.float)
date_by_df
# In[72]:
date1 = date_by_df.groupby('대여일자').sum()[['이동거리','사용시간','운동량']]
date1['이동거리(km)'] = date1['이동거리'] / 1000
date1['사용시간(hour)'] = round(date1['사용시간'] / 60,2)
date1.drop(['이동거리','사용시간'], axis=1, inplace=True)
date1
# #### 데이터 정규화 작업
# In[73]:
from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
x = date1.copy()
x[:] = min_max_scaler.fit_transform(x[:])
x
# ##### 10월2일, 7일, 11월13일, 17일은 전국에 비 엄청 옴
# ##### 10월에는 한달 평균 최저기온이 5도 였는데 11월에는 -5도로 급격히 떨어졌기 대문에 자전거 사용량이 현저히 줄어든 것
# ##### 10월 25일 ~ 27일 사이에 비가 오면서 온도가 급격히 떨어진 것을 볼 수 있음
# In[79]:
x1 = x.reset_index()
plt.figure(figsize=(18,12))
plt.plot(x1['대여일자'], x1['운동량'], label="운동량")
plt.plot(x1['대여일자'], x1['이동거리(km)'],label="이동거리(km)")
plt.plot(x1['대여일자'], x1['사용시간(hour)'],label="사용시간(hour)")
plt.xticks(rotation=45)
plt.grid(False)
plt.legend()
plt.title("사용시간, 이동거리, 운동량, 탄소량")
plt.show()
# #### 대여소별 이용량, 이용시간 EDA
# ##### 대여소 별 실제 지도위의 분포도 그리기
# - 실제 지도에 표시하기 위해서는 위도와 경도 데이터가 필수적
# In[4]:
import requests
# import pprint
from pandas.io.json import json_normalize
#-*- coding:utf-8 -*-
url = "http://openapi.seoul.go.kr:8088/43797267456268633130315757616b41/json/bikeList/1/1000/"
payload = {}
headers= {}
response = requests.request("GET", url, headers=headers, data = payload)
# print(response.text.encode('utf-8'))
# pprint.pprint(response.json())
json_object = response.json()
json_object['rentBikeStatus']['row']
# df = json_normalize(json_object['rentBikeStatus']['row'])
# df
# In[1]:
#-*- coding:utf-8 -*-
url = "http://openapi.seoul.go.kr:8088/43797267456268633130315757616b41/json/bikeList/1001/2000/"
payload = {}
headers= {}
response = requests.request("GET", url, headers=headers, data = payload)
# print(response.text.encode('utf-8'))
# pprint.pprint(response.json())
json_object = response.json()
json_object['rentBikeStatus']['row']
df2 = json_normalize(json_object['rentBikeStatus']['row'])
df2
# In[122]:
df = pd.concat([df,df2]).reset_index(drop=True)
df.info()
df
# In[123]:
df_map = df[['stationName','stationLatitude','stationLongitude']]
df_map[['대여 대여소번호', '대여소명']] = df_map['stationName'].str.split('.', n=1, expand=True)
df_map
df_map['stationLatitude'] = df_map['stationLatitude'].astype(np.float)
df_map['stationLongitude'] = df_map['stationLongitude'].astype(np.float)
df_map['대여 대여소번호'] = df_map['대여 대여소번호'].astype(np.int)
df_map.info()
df_map
# df_map1['stationName'] = df_map1['stationName'].str.replace(pat=r'[0-9.]', repl = r'', regex=True)
# #### 대여소 경도,위도 데이터와 대여소 별 이용건수 데이터 merge
# - 각각의 다른 데이터를 '대여소' 기준으로 merge
# In[124]:
xxx1 = pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트데이터/무제 폴더/서울특별시 공공자전거 대여정보_201910_1.csv')
xxx1.columns = ['자전거번호','대여일시','대여 대여소번호','stationName','대여거치대','반납일시','반납대여소번호','반납대여소명','반납거치대','이용시간','이용거리']
xxx2 = | pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트데이터/무제 폴더/서울특별시 공공자전거 대여정보_201910_2.csv') | pandas.read_csv |
import requests
import base64
import datetime
from urllib.parse import urlencode
import pandas as pd
from IPython.display import Image
from IPython.core.display import HTML
def main(client_id, client_secret, artist, lookup_id=None, playlist_id=None, fields="", query=None, search_type='artist'):
access_token = auth(client_id, client_secret)
search = search_spotify(at=access_token , query=query, search_type=search_type)
artist_id = get_artist_id(access_token, artist=artist)
albums_ids = get_list_of_albums(lookup_id=lookup_id, artist=artist, at=access_token)
album_info_list, albums_json = album_information(list_of_albums=albums_ids, at=access_token)
artists_in_ablums_= get_multiple_artists_from_albums(albums_json= albums_json)
list_of_songs_, list_of_songs_tolist= songs_information(albums_json= albums_json)
artists_in_albums_, songs_json, artist_id_, songs_id_= artists_from_songs(list_of_songs_ids=list_of_songs_tolist ,at=access_token)
artist_list_df= multiple_artists_songs(list_of_artists_ids= artist_id_, at=access_token)
song_features, songs_features_json = song_features(list_of_songs_ids = songs_id_ , at=access_token)
play_list_json_V2, empty_list_one_V2= playlist_data(at=access_token, playlist_id=playlist_id, fields=fields)
def auth():
base_url = "https://accounts.spotify.com/api/token"
client_id = input("client_id: ")
client_secret = input("client_secret: ")
credentials = f"{client_id}:{client_secret}"
b64_credentials = base64.b64encode(credentials.encode())
data_for_token = {"grant_type": "client_credentials"}
headers_for_token = {"Authorization": f"Basic {b64_credentials.decode()}"}
access_token = requests.post(base_url, data=data_for_token, headers=headers_for_token).json()
return access_token
def search_spotify(at, query=None, search_type='artist'):
"""
Search_Type Options: album , artist, playlist, track, show and episode
"""
endpoint = "https://api.spotify.com/v1/search"
headers = { "Authorization": f"Bearer {at['access_token']}" }
data = urlencode({"q": query, "type": search_type.lower()})
lookup_url = f"{endpoint}?{data}"
search = requests.get(lookup_url, headers=headers).json()
return search
def get_artist_id(access_token, artist):
endpoint = "https://api.spotify.com/v1/search"
headers = { "Authorization": f"Bearer {access_token['access_token']}" }
data = urlencode({"q": artist, "type": "artist"})
lookup_url = f"{endpoint}?{data}"
artist_id = requests.get(lookup_url, headers=headers).json()["artists"]["items"][0]["id"]
return artist_id
def get_list_of_albums(lookup_id, at, artist=None, resource_type='albums', versions='v1', market="US"):
if lookup_id == None:
lookup_id = get_artist_id(at, artist=artist)
dataV1 = urlencode({"market": market})
endpoint = f"https://api.spotify.com/{versions}/artists/{lookup_id}/{resource_type}?{dataV1}"
headers = { "Authorization": f"Bearer {at['access_token']}" }
album_json = requests.get(endpoint, headers=headers).json()
album_df=[]
for albums in range(len(album_json["items"])):
album_df.append({
"album_id":album_json["items"][albums]["id"],
"artist_id":album_json["items"][0]["artists"][0]["id"]
})
albums_ids = | pd.DataFrame(album_df) | pandas.DataFrame |
import numpy as np, pandas as pd
import collections, pickle, os
from glob import glob
from collections import OrderedDict
from astropy.io import fits
from astrobase.lcmath import time_bin_magseries_with_errs
from cdips.lcproc.mask_orbit_edges import mask_orbit_start_and_end
from cdips.plotting.vetting_pdf import _given_mag_get_flux
from timmy.paths import DATADIR, RESULTSDIR
from numpy import array as nparr
from scipy.stats import gaussian_kde
def detrend_tessphot(x_obs, y_obs, y_err):
from wotan import flatten
flat_flux, trend_flux = flatten(x_obs, y_obs, method='hspline',
window_length=0.3,
break_tolerance=0.4, return_trend=True)
# flat_flux, trend_flux = flatten(time, flux, method='pspline',
# break_tolerance=0.4, return_trend=True)
# flat_flux, trend_flux = flatten(time, flux, method='biweight',
# window_length=0.3, edge_cutoff=0.5,
# break_tolerance=0.4, return_trend=True,
# cval=2.0)
return flat_flux, trend_flux
def get_tessphot(provenance, yval):
"""
provenance: 'spoc' or 'cdips'
yval:
spoc: 'SAP_FLUX', 'PDCSAP_FLUX'
cdips: 'PCA1', 'IRM1', etc.
"""
if provenance == 'spoc':
lcpaths = glob(os.path.join(
DATADIR, 'MAST_2020-05-04T1852/TESS/*/*-s_lc.fits'))
assert len(lcpaths) == 2
elif provenance == 'cdips':
lcpaths = glob(os.path.join(
DATADIR, 'MAST_2020-05-04T1852/HLSP/*/*cdips*llc.fits'))
assert len(lcpaths) == 2
else:
raise NotImplementedError
time, flux, flux_err, qual = [], [], [], []
for l in lcpaths:
hdul = fits.open(l)
d = hdul[1].data
if provenance == 'spoc':
time.append(d['TIME'])
_f, _f_err = d[yval], d[yval+'_ERR']
flux.append(_f/np.nanmedian(_f))
flux_err.append(_f_err/np.nanmedian(_f))
qual.append(d['QUALITY'])
elif provenance == 'cdips':
time.append(d['TMID_BJD'] - 2457000)
_f, _f_err = _given_mag_get_flux(d[yval], err_mag=d['IRE'+yval[-1]])
flux.append(_f)
flux_err.append(_f_err)
hdul.close()
time = np.concatenate(time).ravel()
flux = np.concatenate(flux).ravel()
flux_err = np.concatenate(flux_err).ravel()
if len(qual)>0:
qual = np.concatenate(qual).ravel()
return time, flux, flux_err, qual
def get_clean_tessphot(provenance, yval, binsize=None, maskflares=0):
"""
Get data. Mask quality cut.
Optionally bin, to speed fitting (linear in time, but at the end of the
day, you want 2 minute).
"""
time, flux, flux_err, qual = get_tessphot(provenance, yval)
N_i = len(time) # initial
if provenance == 'spoc':
# [ 0, 1, 8, 16, 32, 128, 160, 168, 176, 180, 181,
# 512, 2048, 2080, 2176, 2216, 2560]
binary_repr_vec = np.vectorize(np.binary_repr)
qual_binary = binary_repr_vec(qual, width=12)
# See Table 28 of EXP-TESS-ARC-ICD-TM-0014
# don't want:
# bit 3 coarse point
# bit 4 Earth point
# bit 6 reaction wheel desaturation
# bit 8 maunal exclude
# bit 11 cosmic ray detected on collateral pixel row or column
# bit 12 straylight from earth or moon in camera fov
# badbits = [3,4,6,8,11]
badbits = [3,4,6,8,11,12]
sel = np.isfinite(qual)
for bb in badbits:
# note zero vs one-based count here to convert bitwise flags to
# python flags
sel &= ~(np.array([q[bb-1] for q in qual_binary]).astype(bool))
time, flux, flux_err = time[sel], flux[sel], flux_err[sel]
inds = np.argsort(time)
time, flux, flux_err = time[inds], flux[inds], flux_err[inds]
N_ii = len(time) # after quality cut
# finite times, fluxes, flux errors.
sel = np.isfinite(time) & np.isfinite(flux) & np.isfinite(flux_err)
time, flux, flux_err = time[sel], flux[sel], flux_err[sel]
N_iii = len(time)
# time, flux, sel = mask_orbit_start_and_end(time, flux, orbitgap=0.5,
# expected_norbits=2,
# orbitpadding=6/(24),
# raise_expectation_error=True,
# return_inds=True)
# flux_err = flux_err[sel]
# N_iii = len(time) # after orbit edge masking
if maskflares:
t_offset = np.nanmin(time)
FLARETIMES = [
(4.60+t_offset, 4.63+t_offset),
(37.533+t_offset, 37.62+t_offset)
]
flaresel = np.zeros_like(time).astype(bool)
for ft in FLARETIMES:
flaresel |= ( (time > min(ft)) & (time < max(ft)) )
time, flux, flux_err = (
time[~flaresel], flux[~flaresel], flux_err[~flaresel]
)
N_iv = len(time)
x_obs = time
y_obs = (flux / np.nanmedian(flux))
y_err = flux_err / np.nanmedian(flux)
print(42*'-')
print('N initial: {}'.
format(N_i))
print('N after quality cut: {}'.
format(N_ii))
print('N after quality cut + finite masking: {}'.
format(N_iii))
if maskflares:
print('N after quality cut + finite masking + flare masking: {}'.
format(N_iv))
print(42*'-')
if isinstance(binsize, int):
bd = time_bin_magseries_with_errs(
x_obs, y_obs, y_err, binsize=binsize, minbinelems=5
)
x_obs = bd['binnedtimes']
y_obs = bd['binnedmags']
# assume errors scale as sqrt(N)
original_cadence = 120
y_err = bd['binnederrs'] / (binsize/original_cadence)**(1/2)
assert len(x_obs) == len(y_obs) == len(y_err)
return (
x_obs.astype(np.float64),
y_obs.astype(np.float64),
y_err.astype(np.float64)
)
def get_elsauce_phot(datestr=None):
"""
get ground-based photometry from Phil Evans.
2020-04-01: R_c
2020-04-26: R_c
2020-05-21: I_c
2020-06-14: B_j
"""
if datestr is None:
lcglob = os.path.join(RESULTSDIR, 'groundphot', 'externalreduc',
'bestkaren', 'to_fit', '*.dat')
lcpaths = glob(lcglob)
assert len(lcpaths) == 4
else:
lcglob = os.path.join(RESULTSDIR, 'groundphot', 'externalreduc',
'bestkaren', 'to_fit', f'TIC*{datestr}*.dat')
lcpaths = glob(lcglob)
assert len(lcpaths) == 1
time, flux, flux_err = [], [], []
for l in lcpaths:
df = pd.read_csv(l, delim_whitespace=True)
time.append(nparr(df['BJD_TDB']))
if 'rel_flux_T1_dfn' in df:
flux_k = 'rel_flux_T1_dfn'
flux_err_k = 'rel_flux_err_T1_dfn'
else:
flux_k = 'rel_flux_T1_n'
flux_err_k = 'rel_flux_err_T1_n'
flux.append(nparr(df[flux_k]))
flux_err.append(nparr(df[flux_err_k]))
time = np.concatenate(time).ravel()
flux = np.concatenate(flux).ravel()
flux_err = np.concatenate(flux_err).ravel()
return time, flux, flux_err
def get_astep_phot(datestr=None):
"""
get ground-based photometry from ASTEP400
datestrs = ['20200529', '20200614', '20200623']
"""
if datestr is None:
raise NotImplementedError
else:
lcglob = os.path.join(RESULTSDIR, 'groundphot', 'externalreduc',
'ASTEP_to_fit', f'TIC*{datestr}*.csv')
lcpaths = glob(lcglob)
assert len(lcpaths) == 1
time, flux, flux_err = [], [], []
for l in lcpaths:
df = | pd.read_csv(l) | pandas.read_csv |
import argparse
import os
import sys
import h5py
import numpy as np
import pandas as pd
from PIL import Image
from scipy.special import softmax
from sklearn.metrics import confusion_matrix
import torch
from model import Net
from utils import cifar10_loader, DEFAULT_WORKSPACE, get_devices, NUM_MODELS
def main(argv=sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=1000)
parser.add_argument('--workspace', type=str, default=DEFAULT_WORKSPACE)
parser.add_argument('--num-saved-images', type=int, help='Max number of images to save per class.')
devices = get_devices()
parser.add_argument('--device', default='cuda' if 'cuda' in devices else 'cpu', choices=devices)
args = parser.parse_args(argv[1:])
os.makedirs(args.workspace, exist_ok=True)
eval_dir = os.path.join(args.workspace, 'eval')
os.makedirs(eval_dir, exist_ok=True)
model_range = range(NUM_MODELS + 1)
attacked_idx = 0
for model_idx in model_range:
print(f'idx: {model_idx}')
outdir = os.path.join(eval_dir, 'by_model', str(model_idx))
os.makedirs(outdir, exist_ok=True)
net = Net().to(args.device)
net_path = os.path.join(args.workspace, 'networks', f'{model_idx}.pt')
net.load_state_dict(torch.load(net_path, map_location='cpu'))
net.eval()
test_loader = cifar10_loader(args.batch_size, train=False, shuffle=False)
classes = test_loader.dataset.classes
saved_img_counts = [0] * 10
y = []
y_pred = []
y_pred_proba = []
y_repr = []
for batch_idx, (x_batch, y_batch) in enumerate(test_loader):
x_batch, y_batch = x_batch.to(args.device), y_batch.to(args.device)
y.extend(y_batch.tolist())
outputs, representations = net(x_batch, include_penultimate=True)
outputs = outputs.detach().cpu().numpy()
representations = representations.detach().cpu().numpy()
y_pred.extend(outputs.argmax(axis=1))
y_pred_proba.extend(softmax(outputs, axis=1).tolist())
y_repr.extend(representations.tolist())
# Save example images.
if model_idx == 0:
for image_idx, class_ in enumerate(y_batch.tolist()):
if args.num_saved_images is not None and saved_img_counts[class_] >= args.num_saved_images:
continue
img_dir = os.path.join(eval_dir, 'images', f'{class_}_{classes[class_]}')
os.makedirs(img_dir, exist_ok=True)
img_arr = (x_batch[image_idx].detach().cpu().numpy() * 255).round().astype(np.uint8).transpose([1, 2, 0])
img = Image.fromarray(img_arr)
img_id = test_loader.batch_size * batch_idx + image_idx
img.save(os.path.join(img_dir, f'{img_id}.png'))
saved_img_counts[class_] += 1
y = np.array(y)
y_pred = np.array(y_pred)
y_pred_proba = np.array(y_pred_proba)
correct = y_pred == y
y_repr = np.array(y_repr)
np.savetxt(os.path.join(outdir, 'ground_truth.csv'), y, delimiter=',', fmt='%d')
np.savetxt(os.path.join(outdir, 'pred.csv'), y_pred, delimiter=',', fmt='%d')
np.savetxt(os.path.join(outdir, 'pred_proba.csv'), y_pred_proba, delimiter=',', fmt='%f')
np.savetxt(os.path.join(outdir, 'correct.csv'), correct, delimiter=',', fmt='%d')
np.savetxt(os.path.join(outdir, 'representations.csv'), y_repr, delimiter=',', fmt='%f')
# Save transposed representations as HDF5 for quicker parsing/loading later.
with h5py.File(os.path.join(outdir, 'representations.hdf5'), 'w') as f:
f.create_dataset('representations', data=y_repr.T)
cm = confusion_matrix(y, y_pred)
print('Confusion Matrix:')
print(cm)
np.savetxt(os.path.join(outdir, 'confusion.csv'), cm, delimiter=',', fmt='%d')
num_correct = correct.sum()
total = len(y_pred)
accuracy = num_correct / total
eval_dict = {
'correct': [num_correct],
'total': [total],
'accuracy': [accuracy]
}
eval_df = | pd.DataFrame.from_dict(eval_dict) | pandas.DataFrame.from_dict |
from tensorflow.python.ops.functional_ops import While
import tensorflow as tf
import numpy as np
import pandas as pd
import waktu as wk
import time
from datetime import datetime
from datetime import date
import schedule
import pyrebase
import json
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from tensorflow import keras
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.python.eager.context import num_gpus
from os import read, stat_result
from re import T, X
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
dbStore = firestore.client()
def cekHari():
cekNow = date.today().strftime("%A")
if cekNow == 'Monday':
now = 0
elif cekNow == 'Tuesday':
now = 1
elif cekNow == 'Wednesday':
now = 2
elif cekNow == 'Thursday':
now = 3
elif cekNow == 'Friday':
now = 4
elif cekNow == 'Saturday':
now = 5
elif cekNow == 'Sunday':
now = 6
return now
config = {
"apiKey": "<KEY>",
"authDomain": "cloudta2021-fa4af.firebaseapp.com",
"databaseURL": "https://cloudta2021-fa4af-default-rtdb.firebaseio.com",
"storageBucket": "cloudta2021-fa4af.appspot.com"
}
DataHead = "Dataset Hasil Pengujian"
firebase = pyrebase.initialize_app(config)
db = firebase.database()
timeNow = datetime.now()
jam = timeNow.hour
menit = timeNow.minute
timestamp = timeNow.strftime("%H:%M")
day = date.today().strftime("%A")
idrelay = [0, 1, 2, 3]
hari = cekHari()
waktu = wk.cekWaktu(jam, menit)
# waktu = 120
# hari = 0
data = pd.read_csv('FixDataBind.csv')
data = pd.DataFrame(data, columns=['waktu', 'hari', 'idrelay', 'status'])
data['waktu'] = pd.factorize(data['waktu'])[0]
data['hari'] = pd.factorize(data['hari'])[0]
data['idrelay'] = pd.factorize(data['idrelay'])[0]
data['status'] = pd.factorize(data['status'])[0]
x = data.iloc[:, 0:3].values
y = data.iloc[:, -1].values
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.1, random_state=0)
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.fit_transform(x_test)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(units=3, activation='relu'))
model.add(tf.keras.layers.Dense(units=26, activation='relu'))
model.add(tf.keras.layers.Dense(units=8, activation='relu'))
model.add(tf.keras.layers.Dense(units=2, activation='sigmoid'))
start = time.perf_counter()
opt = keras.optimizers.Adam(learning_rate=0.02)
model.compile(optimizer=opt,
loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=1100, batch_size=256)
print(model.layers[0].weights)
# print(model.layers[0].bias.numpy())
print(model.layers[1].weights)
# print(model.layers[1].bias.numpy())
print(model.layers[2].weights)
# print(model.layers[2].bias.numpy())
print(model.layers[3].weights)
# print(model.layers[3].bias.numpy())
xData = model.evaluate(x_test, y_test, batch_size=256)
elapsed = time.perf_counter() - start
akurasi = float(xData[1])
error = float(xData[0])
df = | pd.read_csv('FixDataBind.csv') | pandas.read_csv |
import copy
import json
import jsonschema
import logging
import pandas as pd
import os
from sklearn.cross_validation import train_test_split
import minst.utils as utils
logger = logging.getLogger(__name__)
class MissingDataException(Exception):
pass
class Observation(object):
"""Document model each item in the collection."""
# This should use package resources :o(
SCHEMA_PATH = os.path.join(os.path.dirname(__file__), 'schema',
'observation.json')
SCHEMA = json.load(open(SCHEMA_PATH))
def __init__(self, index, dataset, audio_file, instrument, source_index,
start_time, duration, note_number=None, dynamic='',
partition=''):
"""Model definition for an instrument observation.
Parameters
----------
index :
dataset :
audio_file : str
Relative file path to an audiofile.
instrument :
source_index :
start_time :
duration :
note_number :
dynamic :
partition :
Returns
-------
obs : Observation
Populated observation
"""
self.index = index
self.dataset = dataset
self.audio_file = audio_file
self.instrument = instrument
self.source_index = source_index
self.start_time = start_time
self.duration = duration
self.note_number = note_number
self.dynamic = dynamic
self.partition = partition
def to_builtin(self):
return self.__dict__.copy()
@classmethod
def from_series(cls, series):
"""Convert a pd.Series to an Observation."""
return cls(index=series.name, **series.to_dict())
def to_series(self):
"""Convert to a flat series (ie make features a column)
Returns
-------
pandas.Series
"""
flat_dict = self.to_dict()
name = flat_dict.pop("index")
return pd.Series(data=flat_dict, name=name)
def to_dict(self):
return self.__dict__.copy()
def __getitem__(self, key):
return self.__dict__[key]
def validate(self, schema=None, verbose=False, check_files=True):
"""Returns True if valid.
"""
schema = self.SCHEMA if schema is None else schema
success = True
try:
jsonschema.validate(self.to_builtin(), schema)
except jsonschema.ValidationError as derp:
success = False
if verbose:
print("Failed schema test: \n{}".format(derp))
if success and check_files:
success &= utils.check_audio_file(self.audio_file)[0]
if not success and verbose:
print("Failed file check: \n{}".format(self.audio_file))
return success
def _enforce_obs(obs, audio_root='', strict=True):
"""Get dict from an Observation if an observation, else just dict"""
audio_file = obs['audio_file']
escaped_audio_file = os.path.join(audio_root, audio_file)
file_checks = [os.path.exists(audio_file),
os.path.exists(escaped_audio_file)]
if not any(file_checks) and strict:
raise MissingDataException(
"Audio file(s) missing:\n\tbase: {}\n\tescaped:{}"
"".format(audio_file, escaped_audio_file))
if isinstance(obs, Observation):
obs = obs.to_dict()
obs['audio_file'] = escaped_audio_file if file_checks[1] else audio_file
return obs
class Collection(object):
"""Dictionary-like collection of Observations (maintains order).
Expands relative audio files to a given `audio_root` path.
"""
# MODEL = Observation
def __init__(self, observations, audio_root='', strict=False):
"""
Parameters
----------
observations : list
List of Observations (as dicts or Observations.)
If they're dicts, this will convert them to Observations.
data_root : str or None
Path to look for an observation, if not None
"""
self._observations = [Observation(**_enforce_obs(x, audio_root,
strict))
for x in observations]
self.audio_root = audio_root
self.strict = strict
def __eq__(self, a):
is_eq = False
if hasattr(a, 'to_builtin'):
is_eq = self.to_builtin() == a.to_builtin()
return is_eq
def __len__(self):
return len(self.values())
def __getitem__(self, n):
"""Return the observation for a given integer index."""
return self._observations[n]
def items(self):
return [(v.index, v) for v in self.values()]
def values(self):
return self._observations
def keys(self):
return [v.index for v in self.values()]
def append(self, observation, audio_root=None):
audio_root = self.audio_root if audio_root is None else audio_root
obs = _enforce_obs(observation, audio_root, self.strict)
self._observations += [Observation(**obs)]
def to_builtin(self):
return [v.to_builtin() for v in self.values()]
@classmethod
def read_json(cls, json_path, audio_root=''):
with open(json_path, 'r') as fh:
return cls(json.load(fh), audio_root=audio_root)
def to_json(self, json_path=None, **kwargs):
"""Pandas-like `to_json` method.
Parameters
----------
json_path : str, or None
If given, will attempt to write JSON to disk; else returns a string
of serialized data.
**kwargs : keyword args
Pass-through parameters to the JSON serializer.
"""
sdata = json.dumps(self.to_builtin(), **kwargs)
if json_path is not None:
with open(json_path, 'w') as fh:
fh.write(sdata)
else:
return sdata
def validate(self, verbose=False, check_files=True):
"""Returns True if all are valid."""
return all([x.validate(verbose=verbose, check_files=check_files)
for x in self.values()])
def to_dataframe(self):
return pd.DataFrame([x.to_series() for x in self.values()])
@classmethod
def from_dataframe(cls, dframe, audio_root=''):
return cls([Observation.from_series(x) for _, x in dframe.iterrows()],
audio_root=audio_root)
def copy(self, deep=True):
return Collection(copy.deepcopy(self._observations))
def view(self, column, filter_value):
"""Returns a copy of the collection restricted to the filter value.
Parameters
----------
column : str
Name of the column for filtering.
filter_value : obj
Value to restrict the collection.
Returns
-------
"""
thecopy = copy.copy(self.to_dataframe())
ds_view = thecopy[thecopy[column] == filter_value]
return Collection.from_dataframe(ds_view, self.audio_root)
def load(filename, audio_root):
"""
"""
return Collection.load(filename)
def partition_collection(collection, test_set, train_val_split=0.2,
max_files_per_class=None):
"""Returns Datasets for train and validation constructed
from the datasets not in the test_set, and split with
the ratio train_val_split.
* First selects from only the datasets given in datasets.
* Then **for each instrument** (so the distribution from
each instrument doesn't change)
* train_test_split to generate training and validation sets.
* if max_files_per_class, also then restrict the training set to
a maximum of that number of files for each train and test
Parameters
----------
test_set : str
String in ["rwc", "uiowa", "philharmonia"] which selects
the hold-out-set to be used for testing.
Returns
-------
partition_df : pd.DataFrame
DataFrame with only an index to the original table, and
the partiition in ['train', 'valid', 'test']
"""
df = collection.to_dataframe()
test_df = collection.view(
column='dataset', filter_value=test_set).to_dataframe()
datasets = set(df["dataset"].unique()) - set([test_set])
search_df = df[df["dataset"].isin(datasets)]
selected_instruments_train = []
selected_instruments_valid = []
for instrument in search_df["instrument"].unique():
instrument_df = search_df[search_df["instrument"] == instrument]
if len(instrument_df) < 2:
logger.warning("Instrument {} doesn't haven enough samples "
"to split.".format(instrument))
continue
groups = instrument_df.groupby(['source_index'])
train_grps, valid_grps = train_test_split(
list(groups), test_size=train_val_split)
# Groups get backed out as (source_index, dataframe) tuples, so stick
# these back together now that they've been partitioned.
traindf = | pd.concat(x[1] for x in train_grps) | pandas.concat |
#########################################################################
#########################################################################
# Classes for handling genome-wide association input and output files, ##
# analysis and qc programs, and post-hoc analyses ##
#########################################################################
#########################################################################
import cgatcore.experiment as E
import cgatcore.iotools as iotools
import numpy as np
import pandas as pd
import pandas.io.sql as pdsql
import re
import random
import os
import subprocess
import rpy2.robjects as ro
from rpy2.robjects import r as R
from rpy2.robjects import pandas2ri as py2ri
# set matplotlib non-interactive backend to Agg to
# allow running on cluster
import collections
import sqlite3 as sql
from math import *
import scipy.stats as stats
class FileGroup(object):
'''
An object for holding, formatting and processing files for genome-wide
association analysis including compressed and binary files
File types supported:
* plink - .ped and .map files
* plink binary - .bim, .fam. and .bed files
* variant call format - .vcf and .bcf (including gzipped vcf)
* Oxford format - .gen or .bgen with matched sample text file (must
be .sample)
* GRM_binary - genetic relationship matrix calculated in an appropriate
program in binary format. File suffixes are *.grm.bin, *.grm.N.bin
and *.grmid
* GRM_gz - previously calcualted gzip compressed GRM, file suffixes
are *.grm.gz and *.grm.id
Phenotypes are assumed to be contained in the relevant files, if not
then an additional phenotypes files can be included using the
`phenotypes` argument. Covariate files (if different from the phenotypes
file) can also be included in the instantiation of a :FileGroup:
object using the `covarite_files` argument.
Only the `files` and `file_format` arguments are required.
Genotype data are assumed to be raw genotype calls. This can be modified
using the `genotype_format` argument upon instantiation. Values allowed
are:
* calls - standard bi-allelic genotype calls, i.e. AA, AB, BB
* imputed_call - discrete genotype calls from imputed data,
essentially treated the same as ``calls``
* genotype_prob - posterior probabilities for each genotype class,
i.e. 0.88 0.07 0.05 corresponding to homozygote
reference, heterozygote then homozygote rare allele.
'''
# Defaults for file formats
ped_file = None
map_file = None
bim_file = None
fam_file = None
bed_file = None
sample_file = None
gen_file = None
bgen_file = None
vcf_file = None
bcf_file = None
def __init__(self, files, file_format, phenotypes=None,
genotype_format="calls", covariate_files=None):
self.files = files
self.file_format = file_format
self.pheno_file = phenotypes
self.genotype_format = genotype_format
self.covariate_files = covariate_files
self.set_file_prefix(files)
def set_file_prefix(self, infiles):
'''Get file prefixes from input files. These are used across all
file formats, e.g. myfile.bed, myfile.bim, myfile.fam name=myfile.
Only use periods, '.' to denote file suffixes. use hyphens and
underscores for separating file names.
Set these to the appropriate attributes.
'''
file_prefixes = set()
for f in infiles:
# get all input file prefixes
if len(f.split("/")) > 1:
g = f.split("/")[-1]
fdir = f.split("/")[:-1]
fdir = "/".join(fdir)
ffile = fdir + "/" + g.split(".")[0]
file_prefixes.add(ffile)
else:
file_prefixes.add(f.split(".")[0])
# if only prefix then use this for all data files
if len(file_prefixes) == 1:
self.name = [xf for xf in file_prefixes][0]
else:
# if there are multiple prefixes then use separate
# flags for file inputs
self.name = None
# define file types by their suffix instead
if self.file_format == "plink":
self.ped_file = [pf for pf in infiles if re.search(".ped",
pf)][0]
self.map_file = [mf for mf in infiles if re.search(".map",
mf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.ped_file
except AssertionError:
raise ValueError(".ped file is missing, please "
"specify")
try:
assert self.map_file
except AssertionError:
raise ValueError(".map file is missing, please "
"specify")
elif self.file_format == "plink_binary":
self.fam_file = [ff for ff in infiles if re.search(".fam",
ff)][0]
self.bim_file = [fb for fb in infiles if re.search(".bim",
fb)][0]
self.bed_file = [bf for bf in infiles if re.search(".bed",
bf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.fam_file
except AssertionError:
raise ValueError(".fam file is missing, please "
"specify")
try:
assert self.bim_file
except AssertionError:
raise ValueError(".bim file is missing, please "
"specify")
try:
assert self.bed_file
except AssertionError:
raise ValueError(".bed file is missing, please "
"specify")
elif self.file_format == "oxford":
self.gen_file = [gf for gf in infiles if re.search(".gen",
gf)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.gen_file
except AssertionError:
raise ValueError(".gen file missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file missing, please "
"specify")
elif self.file_format == "oxford_binary":
self.bgen_file = [bg for bg in infiles if re.search(".bgen",
bg)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bgen_file
except AssertionError:
raise ValueError(".bgen file is missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file is missing, please "
"specify")
elif self.file_format == "vcf":
self.vcf_file = [vf for vf in infiles if re.search(".vcf",
vf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.vcf_file
except AssertionError:
raise ValueError(".vcf file is missing, please "
"specify")
elif self.file_format == "bcf":
self.bcf_file = [bv for bv in infiles if re.search(".bcf",
bv)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bcf_file
except AssertionError:
raise ValueError(".bcf file is missing, please "
"specify")
elif self.file_format == "GRM_binary":
self.id_file = [ig for ig in infiles if re.search(".grm.id",
ig)][0]
self.n_file = [gn for gn in infiles if re.search(".grm.N.bin",
gn)][0]
self.bin_file = [gb for gb in infiles if re.search(".grm.bin",
gb)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.n_file
except AssertionError:
raise ValueError("grm.N file is missing, please "
"specify")
try:
assert self.bin_file
except AssertionError:
VaueError("GRM genotype is missing, please "
"specify")
elif self.file_format == "GRM_plink":
self.id_file = [ig for ig in infiles if re.search(".rel.id",
ig)][0]
self.rel_file = [gn for gn in infiles if re.search(".rel.N.bin",
gn)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.rel_file
except AssertionError:
raise ValueError("rel.N file is missing, please "
"specify")
def set_phenotype(self, pheno_file=None, pheno=1):
'''
Set the phenotype for a set of individuals
using an external phenotypes file.
Default is to use the (n+2)th column, designated
as pheno 1.
'''
if type(pheno) == int:
pheno = str(pheno)
elif type(pheno) == str:
pass
else:
raise AttributeError("Type of pheno unknown. "
"Must be str or int.")
self.pheno_file = pheno_file
self.pheno = pheno
class GWASProgram(object):
'''
A base level object for programs designed to perform genome-wide
association analysis and operate on genome-wide genotyping data.
[INSERT PROPER DOCSTRING - see style guide]
'''
def __init__(self, executable=None, required_format=None):
self.executable = executable
self.require_format = required_format
def program_call(self, infiles, outfile):
'''build a statement to perform genome-wide
analysis using infiles
'''
return ""
def postprocess(self, infiles, outfile):
'''collect and process output files from
program - format for Result class objects'''
return ""
def build(self, infiles, outfile):
'''run analysis program'''
cmd_program = self.program_call(infile, outfile)
cmd_postprocess = self.postprocess(infiles, outfile)
if cmd_postprocess:
cmd_postprocess = cmd_postprocess.strip().endswith(";")
assert cmd_postprocess
else:
pass
statement = " checkpoint; ".join((cmd_program,
cmd_postprocess))
return statement
class GCTA(GWASProgram):
'''
GCTA is designed for computing genetic relationship matrices, linear
mixed model analyses and phenotype estimation/prediction.
It can also perform SNP-wise GWAS.
Files MUST be in Plink binary format
'''
def __init__(self, files, options=None, settings=None,
design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "gcta64"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
'''build GCTA call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self._build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
raise AttributeError("Files must be in binary plink format "
"or as a GRM to use GCTA. Please "
"convert and try again.")
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
self.statement["program"] = " ".join(statement)
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_binary" or file_format == "GRM_plink":
statement = " --grm %s " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def apply_filters(self, filter_type, filter_value):
'''
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome_number - for non-human species, the number of chromosomes to
be considered autosomes
* exclude_snps - text file list of variant IDs to exclude from analysis
[file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
'''
if filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "autosome_number":
self._construct_filters(autosome_number=filter_value)
elif filter_type == "exclude_snps":
self._construct_filters(exclude_snps=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
def _construct_filters(self, **kwargs):
'''
Add filter to each GCTA run.
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
* update_gender - provide gender information in a separate text
file. [file]
* grm_threshold - remove one of a pair of individuals with
estimated relatedness greater than this value.
* ld_significance - p-value threshold for regression test
of LD significance
* genotype_call - GenCall score cut-off for calling raw
genotypes into Plink PED format
* meta_pval - p-value threshold cut-off for conditional
and joint genome-wide analysis
* cojo_window - distance in kb beyond wich SNPs this
distance apart are assumed to be in linkage equilibrium
* cojo_collinear - multiple regression R^2 on selected SNPs
value above which the testing SNP will not be selected.
* cojo_inflation - adjust COJO analysis test statistics
for genomic control. [boolean]
* reml_iterations - maximum number of iterations to use
during reml analysis. Default is 100. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"autosome": " --autosome ",
"autosome_number": " --autosome-num %s ",
"grm_threshold": " --grm-cutoff %s ",
"ld_significance": " --ls-sig %s ",
"genotype_call": " --gencall %s ",
"meta_pval": " --cojo-p %s ",
"cojo_window": " --cojo-wind %s ",
"cojo_collinear": " --cojo-collinear %s ",
"cojo_inflation": " --cojo-gc ",
"reml_iterations": " --reml-maxit %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.items():
filter_dict[key] = value
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == 'bool':
filters.append(filter_map[each])
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def mixed_model(self, lmm_method, grm=None, qcovar=None,
dcovar=None):
'''
Run a linear mixed model with the GRM used to model
random effects of an estimated genetic relationshi
between individuals
'''
# add the mlm flag to the statement
self._run_tasks(lmm=lmm_method)
# construct the rest of mlm statement
statement = []
if qcovar:
statement.append(" --qcovar %s " % qcovar)
else:
pass
if dcovar:
statement.append(" --covar %s " % dcovar)
else:
pass
try:
statement.append(" --grm %s " % grm)
except ValueError:
E.warn("No GRM has been provided, the GRM ")
self.statement["mlm"] = " ".join(statement)
def reml_analysis(self, method, parameters, prevalence=None,
qcovariates=None, discrete_covar=None):
'''
Use REML to estimate the proportion of phenotypic variance
explained by the estimated genetic relationship between
individuals.
Arguments
---------
method: string
GCTA method to use for REML estimation of h2. Includes:
* snpBLUP - calculate the SNP BLUPs from the genotype
data and the estimated total genetic value/ breeding value
* fixed_cor -
* priors - provide initial priors for the variance components
estimation
* unconstrained - allow variance estimates to fall outside
of the normal parameter space, bounded [0, ).
* GxE - estimate the contribution of GxE with covariates
to the phenotype variance
* BLUP_EBV - output individual total genetic effect/breeding
values
'''
statement = []
try:
params = parameters.split(",")
if len(params) == 1:
params = params[0]
else:
pass
except AttributeError:
params = parameters
self._run_tasks(parameter=params,
greml=method)
if prevalence:
statement.append(" --prevalence %0.3f " % prevalence)
else:
pass
if qcovariates:
statement.append(" --qcovar %s " % qcovariates)
else:
pass
if discrete_covar:
statement.append(" --covar %s " % discrete_covar)
else:
pass
self.statement["reml"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
The principal functions of GCTA revolve around GRM estimation
and variance components analysis, such as REML estimation of
heritability and variance components, BLUP and phenotype prediciton.
It can also be used to do PCA and conditional and joint GWAS.
Tasks
-----
* pca - perform principal components analysis on a GRM
* greml - perform restricted maximum likelihood analysis
for estimation of variance components
* estimate_ld - estimate the linkage disequilibrium structure
over the genomic regions specified
* simulate_gwas - simulate genome-wide association data based
on observed genotype data
* cojo - conditional and joint genome-wide association
analysis across SNPs and covariates
* bivariate_reml - perform GREML on two traits, either both
binary, both quantitative or one of each
* lmm - perform a linear mixed model based association analysis
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
# put all of the other tasks as options in the calling function
task_map = {"pca": " --pca %s ",
"greml": {"standard": " --reml ",
"priors": " --reml --reml-priors %s ",
"reml_algorithm": " --reml --reml-alg %s ",
"unconstrained": " --reml --reml-no-constrain ",
"GxE": " --reml --gxe %s ",
"LRT": " --reml --reml-lrt %s ",
"BLUP_EBV": " --reml --reml-pred-rand ",
"snpBLUP": " --blup-snp %s "},
"estimate_ld": " --ld %s ",
"simulate_gwas": {"quantitative": " --simu-qt ",
"case_control": " --simu-cc %s %s "},
"cojo": {"stepwise": " --cojo-file %s --cojo-slct ",
"no_selection": " --cojo-file %s --cojo-joint ",
"snp_conditional": " --cojo-file %s --cojo-cond %s "},
"bivariate_reml": {"standard": " --reml-bivar %s ",
"no_residual": " --reml-bivar %s --reml-bivar-nocove ",
"fixed_cor": " --reml-bivar %s --reml-bivar-lrt-rg %s "},
"lmm": {"standard": " --mlma ",
"loco": " --mlma-loco ",
"no_covar": " --mlma-no-adj-covar "},
"remove_relations": {"cutoff": " --grm-cutoff %s "}}
for task, value in kwargs.items():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
# LD estimation is likewise not nested
elif task == "estimate_ld":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
raise IOError("no SNP file list detected")
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("% Task not recognised, see docs for details of "
"recognised tasks" % task)
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
self.statement["tasks"] = " ".join(statement)
def genetic_relationship_matrix(self, compression="binary", metric=None,
shape="square", options=None):
'''
Calculate the estimated genetic relationship matrix from
genotyping data
* estimate_grm - estimate the realized genetic relationship
matrix between individuals from genotyping data
'''
mapf = {"binary": " --make-grm-bin ",
"gzip": " --make-grm-gz ",
"no_compress": " --make-grm ",
"X_chr": " --make-grm-chr ",
"X_chr_gz": " --make-grm-gz ",
"inbreeding": " --ibc "}
if options == "X_chr":
if compression == "gz":
state = mapf["X_chr_gz"]
else:
state = mapf["X_chr"]
elif options == "inbreding":
state = mapf["inbreeding"]
else:
pass
# check compression is compatible
if compression == "gz":
state = mapf["gzip"]
elif compression == "bin":
state = mapf["binary"]
elif compression is None and not options:
state = mapf["no_compress"]
self.statement["matrix"] = state
def build_statement(self, infiles, outfile, threads=None,
memory=None, parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["mlm"])
except KeyError:
pass
try:
statement.append(self.statement["reml"])
except KeyError:
pass
if threads:
statement.append(" --thread-num %i " % threads)
else:
pass
# add output flag
statement.append(" --out %s " % outfile)
os.system(" ".join(statement))
class Plink2(GWASProgram):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9 to be in the users PATH variable as ``plink2`` to
distinguish it from Plink v1.07.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plink2"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
''' build Plink call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self. _build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
inputs = self._build_multiple_file_input(infiles,
infiles.file_format)
statement.append(inputs)
# check for the presence of an additional phenotypes file
try:
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
except AttributeError:
pass
self.statement["program"] = " ".join(statement)
def hamming_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using Hamming distance across all variants
'''
# check shape is compatible
if not shape:
shape = "triangle"
elif shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression)
self.statement["matrix"] = state
def ibs_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise similarity matrix between
individuals using proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genome_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using 1 - proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genetic_relationship_matrix(self, shape, compression, metric,
options=None):
'''
Calculate genomic pair-wise distance matrix between
individuals using proportion of IBS alleles
Requires the use of the Plink2 parallelisation to run with large
cohorts of patients
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if metric in ["cov", "ibc2", "ibc3"]:
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression, options=metric)
else:
E.info("%s metric not recognised. Running with default Fhat1" % metric)
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression)
self.statement["matrix"] = state
def apply_filters(self, filter_type, filter_value):
'''
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* exclude_snp - exclude this single variant
* exclude_snps - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
'''
if filter_type == "genotype_rate":
self._construct_filters(genotype_rate=filter_value)
elif filter_type == "hwe":
self._construct_filters(hwe=filter_value)
elif filter_type == "missingness":
self._construct_filters(missingness=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "exclude_snp":
self._construct_filters(exclude_snp=filter_value)
elif filter_type == "exclude":
self._construct_filters(exclude=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "exclude_chromosome":
self._constuct_filters(exclude_chromosome=filter_value)
elif filter_type == "autosome":
self._construct_filters(autosome=filter_value)
elif filter_type == "pseudo_autosome":
self._construct_filters(pseudo_autosome=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
elif filter_type == "snp_bp_range":
self._construct_filters(snp_bp_range=filter_value)
elif filter_type == "conditional_snp":
self._construct_filters(conditional_snp=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
def _build_multiple_file_input(self, infiles, file_format):
'''
internal function only. Use it to construct
the appropriate file input flags
'''
statement = None
if file_format == "oxford":
statement = " --gen %s --sample %s " % (infiles.gen_file,
infiles.sample_file)
elif file_format == "oxford_binary":
statement = " --bgen %s --sample %s " % (infiles.bgen_file,
infiles.sample_file)
elif file_format == "plink":
statement = " --ped %s --map %s " % (infiles.ped_file,
infiles.sample_file)
elif file_format == "plink_binary":
statement = " --bed %s --bim %s --fam %s " % (infiles.bed_file,
infiles.bim_file,
infiles.fam_file)
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.vcf_file
elif file_format == "bcf":
statement = " --bcf %s " % infiles.vcf_file
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
else:
raise AttributeError("file format is not defined. Please "
"define the input file formats when "
"instantiating a FileGroup object")
return statement
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_plink":
statement = " --grm.bin %s " % infiles.name
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def _construct_filters(self, **kwargs):
'''
Add filter to each plink run. [data type]
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* missingness - exclude individuals with total genotype missingness
above this value. [float]
* hwe - p-value threshold for excluding SNPs deviating from
Hardy-Weinberg expectations. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* mendelian_error - filter out samples/trios exceeding the error
threshold. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* quality_score_file - vcf file with variants and quality scores. Use
`qual_score_column` and `var_id_col` to specify which columns
correspond to the quality score and variant ID columns.
[file] <int> <int>
* min_qual_score - alters the lower bound of the quality score
threshold; default is 0.[int]
* max_qual_score - sets an upper limit on the quality scores;
default is Inf. [int]
* allow_no_sex - prevents phenotypes set to missing if there is no
gender information. [boolean]
* enforce_sex - force phenotype missing when using --make-bed, --recode
or --write-covar. [boolean]
* subset_filter - filter on a particular subset. Choices are: cases,
controls, males, females, founders, nonfounders. [str]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
* specific_snp - only load the variant specified. [str]
* exclude_snp - exclude this single variant
* window_size - alters behaviour of `specific_snp` and `exclude_snp`
to include/exclude SNPs within +/- half of this distance (kb) are
also included. [float]
* range_resolution - sets the resolution of the (from, to) range.
Either bp, kb or mb. If set it will take the values from
`snp_bp_range`. [str/int/float]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"genotype_rate": " --geno %s ",
"missingness": "--mind %s ",
"hwe": " --hwe %s ",
"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"mendelian_error": " --me %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"quality_score_file": " --qual-scores %s ",
"qual_score_column": " %s ",
"var_id_col": " %s ",
"min_qual_score": " --qual-threshold %s ",
"max_qual_score": " --qual-max-threshold %s ",
"allow_no_sex": " --allow-no-sex ",
"enforce_sex": " --must-have-sex ",
"subset_filter": " --filter-%s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"exclude_chromosome": " --not-chr %s ",
"autosome": " --autosome ",
"pseudo_autosome": " --autosome-xy ",
"ignore_indels": " --snps-only no-DI ",
"snp_id_range": " --from %s --to %s ",
"specific_snp": " --snp %s ",
"window_size": " --window %s ",
"exclude_snp": " --exclude-snp %s ",
"snp_bp_range": "--from-bp %s --to-bp %s ",
"covariates_file": " --filter %s ",
"covariate_filter": " %s ",
"covariate_column": " --mfilter %s ",
"missing_phenotype": " --prune ",
"conditional_snp": " --condition %s ",
"haplotype_size": " --blocks-max-kb %s ",
"haplotype_frequency": " --blocks-min-maf %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.items():
filter_dict[key] = value
# need to check for covariates and qual scores - these
# are more complex. Deal with these first and remove
# from dictionary once complete.
try:
assert filter_dict["quality_score_file"]
assert filter_dict["qual_score_column"]
assert filter_dict["var_id_col"]
quals = []
qual_file = filter_dict["quality_score_file"]
score_col = filter_dict["qual_score_column"]
id_col = filter_dict["var_id_col"]
quals.append(filter_map["quality_score_file"] % qual_file)
quals.append(filter_map["qual_score_column"] % score_col)
quals.append(filter_map["var_id_col"] % id_col)
# remove from dictionary
filter_dict.pop("qual_score_column", None)
filter_dict.pop("var_id_col", None)
filters.append(" ".join(quals))
except KeyError:
pass
try:
assert filter_dict["covariates_file"]
assert filter_dict["covariate_filter"]
covars = []
covar_file = filter_dict["covariates_file"]
covar_val = filter_dict["covariate_filter"]
covars.append(filter_map["covariates_file"] % covar_file)
covars.append(filter_map["covariate_filter"] % covar_val)
# check to filter on specific column numnber, default is 3rd file
# column, i.e. (n+2)th column
try:
assert filter_dict["covariate_column"]
covar_col = filter_dict["covariate_column"]
covars.append(filter_map["covariate_column"] % covar_col)
filter_dict.pop("covariate_column", None)
except KeyError:
pass
# remove from dictionary
filter_dict.pop("covariates_file", None)
filter_dict.pop("covariate_filter", None)
filters.append(" ".join(covars))
except KeyError:
pass
# range_resolution and snp_bp_range are used together
try:
assert filter_dict["snp_bp_range"]
flags = filter_map["snp_bp_range"]
from_pos = filter_dict["snp_bp_range"].split(",")[0]
to_pos = filter_dict["snp_bp_range"].split(",")[1]
filters.append(flags % (from_pos, to_pos))
# remove so they are not duplicated - source of bugs
filter_dict.pop("snp_bp_range", None)
except KeyError:
pass
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == bool:
filters.append(filter_map[each])
# handle multiple arguments in string format
elif len(filter_dict[each].split(",")) > 1:
vals = tuple(filter_dict[each].split(","))
filters.append(filter_map[each] % vals)
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def calc_ld(self, ld_statistic, ld_threshold,
ld_shape="table"):
'''
Calculate linkage disequilibrium between all SNP
pairs.
Arguments
---------
ld_statistic: string
The LD statistic to report, either correlation or squared correlation
of inter-variant allele counts
ld_threshold: float
minimum value to report for pair-wise LD
ld_window: int
max distance (in Kb) between SNPs for calculating LD
ld_shape: string
shape to use for reporting LD, either a table or a matrix. If a
matrix then either square, square with diagnonal (square0) or
triangular. Square matrices are symmetric.
'''
statement = []
ld_map = {"r": " --r %s dprime ",
"r2": "--r2 %s dprime "}
shape_map = {"table": "inter-chr gz",
"square": "square gz",
"square0": "square0 gz",
"triangle": "triangle gz"}
try:
statement.append(ld_map[ld_statistic] % shape_map[ld_shape])
except KeyError:
raise ValueError("%s LD statistic not recognised. Please "
"use eithr 'r' or 'r2'" % ld_statistic)
if type(ld_threshold) == float:
statement.append(" --ld-window-r2 %0.3f " % ld_threshold)
else:
E.warn("threshold type not recognised, setting to default "
"value of 0.2")
self.statement["tasks"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
Plink2 is capable of much more than just running basic association
analyses.
These include file processing, reformating, filtering, data summaries,
PCA, clustering, GRM calculation (slow and memory intense), etc.
multiple tasks can be added by separate calls to this function.
For instance, adding phenotype and gender information using the
update_samples task whilst change the file format.
Tasks
-----
* change_format - convert from input format to an alternative format
after applying filters.
* change_missing_values - alters the genotype or phenotype missing
value into the value supplied.
* update_variants - use this to fill in missing variant IDs, useful
for data from exome or whole-genome sequencing that have
non-standard IDs.
* update_samples - update phenotype and sample information
* flip_strands - flip the strand for alleles, swaps A for T and
C for G.
* flip_scan - use the LD-based scan to check SNPs have not had
incorrect strand assignment. Particularly useful if cases and
controls were genotyped separately, or the cohort was genotyped
in different batches.
* sort - sort files by individual and/or family IDs
* merge - merge new filesets with reference fileset.
* merge_mode - handling of missing values and overwriting values
* find_duplicates - find and output duplicate variants based on bp position,
or variant ID. Useful to output for the --exclude filtering flag.
* remove_relations - remove one of a pair of individuals with IBS >=
a threshold. Recommended minimum is 3rd cousins (IBS >= 0.03125).
* check_gender - check imputed gender from non-pseudoautosomal X
chromsome genotypes against self-reported gender
* estimate_haplotypes - assign SNPs to haplotype blocks and get
positional information
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
task_map = {'change_format': {"plink_binary": " --make-bed ",
"plink": " --recode ",
"oxford": " --recode oxford ",
"oxford_binary": " --recode oxford gen-gz ",
"raw": " --recode A tabx "},
"change_missing_values": {"genotype": " --missing-genotype %s ",
"phenotype": " --missing-phenotype %s "},
"update_variants": {"variant_ids": " --set-missing-var-ids %s ",
"missing_id": " --mising-var-code %s ",
"chromosome": " --update-chr %s ",
"centimorgan": " --update-cm %s ",
"name": " --update-name %s ",
"alleles": " --update-alleles %s ",
"map": " --update-map %s "},
"update_samples": {"sample_ids": " --update-ids %s ",
"parents": " --update-parents %s ",
"gender": " --update-sex %s %s "},
"flip_strands": {"all_samples": " --flip %s ",
"subset": " --flip-subset %s "},
"flip_scan": {"default": " --flip-scan verbose ",
"window": "--flip-scan --flip-scan-window %s ",
"kb": " --flip-scan --flip-scan-window-kb %s ",
"threshold": " --flip-scan --flip-scan-threshold %s "},
"sort": {"none": " --indiv-sort %s ",
"natural": " --indiv-sort %s ",
"ascii": " --indiv-sort %s ",
"file": " --indiv-sort %s "},
"merge": {"plink": " --merge %s ",
"binary_plink": " --bmerge %s "},
"merge_mode": {"default": " --merge-mode 1 ",
"orginal_missing": " --merge-mode 2 ",
"new_nonmissing": " --merge-mode 3 ",
"no_overwrite": " --merge-mode 4 ",
"force": " --merge-mode 5 ",
"report_all": " --merge-mode 6 ",
"report_nonmissing": " --merge-mode 7"},
"find_duplicates": {"same_ref": " --list-duplicate-vars require-same-ref ",
"id_match": " --list-duplicate-vars ids-only ",
"suppress_first": " --list-duplicate-vars suppress-first"},
"remove_relations": {"cutoff": " --rel-cutoff %s "},
"check_gender": " --check-sex ",
"pca": " --pca %s ",
"estimate_haplotypes": " --blocks "}
for task, value in kwargs.items():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
elif task == "check_gender":
statement.append(task_map[task])
elif task == "estimate_haplotypes":
statement.append(task_map[task])
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# gender has two string formats
if value == "gender":
gcol = 1
statement.append(sub_task[value] % (parameter,
gcol))
else:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("No sub task found, see docs for details of "
"recognised tasks")
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
# handle multiple tasks for a single run
try:
curr_tasks = self.statement["tasks"]
new_tasks = " ".join(statement)
self.statement["tasks"] = " ".join([curr_tasks, new_tasks])
except KeyError:
self.statement["tasks"] = " ".join(statement)
def _output_statistics(self, **kwargs):
'''
Summary statistics are written to specific files dictated by the
type of statistic
Statistics
----------
* allele_frequency - writes out MAF to `plink`.frq, this can be
modified with specific keywords.
* missing_data - generates a report of data missingness, can be subset
into within family and/or cluster reports
* hardy_weinberg - calculates all HWE p-values using exact test
statistics. For case/control studies reports are written for case,
controls and combined.
* mendel_errors - generates a Mendelian error report across all trios.
There are 10 different codes responding to different Mendelian error
scenarios.
* inbreeding - calculate observed and expected homozygosity across
individuals and F statistics. If the sample size is small then a
file of MAFs is required. Inbreeding coefficients can also be
reported on request using inbreeding_coef.
* gender_checker - checks gender assignment against X chromosome
genotypes. Gender values can also be imputed based on genotype
information using gender_impute.
* wrights_fst - calculate Wright's Fst statistic given a set of
subpopulations for each autosomal diploid variant. Used in
conjunction with the --within flag.
'''
stats_map = {"allele_frequency": " --freq %s ",
"missing_data": " --missing %s ",
"hardy_weinberg": " --hardy midp ",
"mendel_errors": " --mendel %s ",
"inbreeding": " --het %s ",
"inbreeding_coef": " --ibc ",
"gender_checker": " --check-sex ",
"gender_impute": " --impute-sex ",
"wrights_fst": " --fst --within %s ",
"case_control_fst": "--fst %s "}
statement = []
for key, value in kwargs.tems():
if value:
try:
assert stats_map[key]
statement.append(stats_map[key] % value)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
else:
try:
assert stats_map[key]
flag = stats_map[key].rstrip("%s ")
statement.append(flag)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
self.statement["stats"] = " ".join(statement)
def run_association(self, association=None, model=None,
run_options=None,
permutation=False, n_perms=None,
random_seed=None, permutation_options=None,
covariates_file=None, covariates=None):
'''
Construct a statement for a plink2 association analysis.
QC filters are constructed from input during instantiation.
run options include redirecting logging output, using parallelisation,
defining number of threads to use, etc
The default association uses the --assoc flag. Plink will check
phenotype coding, if it is not case/control it assumes
it is a continuous trait and uses linear regression.
Alternative regression models that include covariates can be used,
i.e. logistic and linear regression.
key
***
{CC} - applies to case/control analysis only
{quant} - applies to quantitative trait only
{CC/quant} - applies to both
run_options
-----------
``--assoc``:
* `fisher | fisher-midp` - uses Fisher's exact test to calculate
association p-values or applies Lancaster's mid-p adjustment. {CC}
* `counts` - causes --assoc to report allele counts instead of
frequencies. {CC}
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `qt-means` - generates a .qassoc.means file reporting trait means
and standard deviations by genotype. {quant}
* `lin` - reports the Lin et al (2006) statistic to be reported. If
multiple testing adjustments and/or permutation is also used, they
will be based on this statistic. {quant}
``--model``:
* `fisher | fisher-midp | trend-only` - uses Fisher's exact test
to calculate association p-values or applies Lancaster's mid-p
adjustment. trend-only forces only a trend test to be performed.
{CC}
* `dom | rec | gen | trend` - use the specified test as the basis
for the model permutation. If none are defined the result with the
smallest p-value is reported. {CC}
* --cell - sets the minimum number of observations per cell in the
2x3 contingency table. The default is 0 with the Fisher and
Fiser-midp test, otherwise 5. {CC}
``--linear/logistic``:
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `hide-covar` - removes the covariate specific sections from the
results output. {CC/quant
* `sex | no-x-sex` - `sex` adds sex as covariate to all models,
whislt `no-x-sex` does not include gender into X-chromosome SNP
models. {CC/quant}
* `interaction` - adds in genotype X covariate interaction terms
into the model. Can only be used with permutation is ``--tests``
is also specified. {CC/quant}
* `beta` - reports the beta coefficients instead of the OR in a
logistic model. {CC}
* `standard-beta` - standardizes the phenotype and all predictor
variables to zero mean and unit variance prior to regression
(separate for each variant analysed). {quant}
* `intercept` - includes the intercept in the output results.
{quant}
model
-----
* `recessive` - `recessive` specifies the model assuming the A1 allele
as recessive. {CC/quant}
* `dominant` - `dominant` specifies the model assuming the A1 allele is
dominant. {CC/quant}
* `genotype` - `genotype` adds an additive effect/dominance deviation
2df joint test with two genotype variables in the test (coded 0/1/2
and 0/1/0). {CC/quant}
* `trend` - forces a trend test to be performed. {CC/quant}
* `hethom` - `hethom` uses 0/0/1 and 0/1/0 instead of the genotype
coding. With permutation it will be based on the joint test instead
of just the additive effects. This can be overriden using the
`--tests` flag. {CC/quant}
* `no-snp` - `no-snp` defines a regression of phenotype on covariates
without reference to genotype data, except where `--conditon{-list}`
is specified. If used with permuation, test results will be reported
for every covariate. {CC/quant}
permutation
-----------
If permutation is True, run an adaptive Monte Carlo permutation test.
If n_perms is set, this will run a max(T) permutation test with the n
replications. A random seed will need to be provided.
* `perm-count` - this alters the permutation output report to include
counts instead of frequencies
covariates
----------
These should be provided in a separate file. Specifying which
covariates to include can be done as either a comma-separated list
of covariate names or numbers. These numbers will correspond to the
(n+2)th covariate file column as per the plink documentation.
'''
# model map maps common option effects onto specific syntax
model_map = {"--logistic": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--linear": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--model": {"recessive": "rec",
"dominant": "dom",
"genotype": "gen"}}
statement = []
# construct analysis flags
# add model, i.e. additive, recessive, dominant, etc.
# see docstring for details. Make sure correct modifier is used
# with a mapping dictionary
if association == "logistic":
statement.append(" --logistic ")
m_map = model_map["--logistic"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "linear":
statement.append(" --linear ")
m_map = model_map["--linear"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "model":
statement.append(" --model ")
m_map = model_map["--model"]
statement.append(m_map[model])
else:
statement.append(" --assoc ")
# add in run options. These need to be in their correct
# format already
if run_options:
modifiers = " ".join(run_options)
statement.append(modifiers)
else:
pass
# permutation should have a random seed set by the user. Allow
# this to set it's own seed if one not provided, but report it in
# the log file
if permutation:
try:
assert random_seed
except AssertionError:
rand_seed = random.randint(0, 100000000)
E.warn("No seed is provided for the permutation test. "
"Setting seed to %s. Record this for future "
"replicability" % random_seed)
if n_perms:
statement.append(" mperm=%i --seed %s " % (n_perms,
random_seed))
else:
statement.append(" perm --seed %s " % (random_seed))
else:
pass
# if using linear or logistic, covariates can be added into the model
# to adjust for their effects - assumes fixed effects of covariates
# mixed models are not yet implemented in Plink2.
if covariates:
covars = covariates.split(",")
if len(covars) > 1:
if type(covars[0]) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars[0]) == int:
m_covar = " --covar-number %s " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
elif len(covars) == 1:
if type(covars) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars) == int:
m_covar = " --covar-number %i " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
if covariates and covariates_file:
statement.append(" --covar %s %s " % (covariates_file,
m_covar))
elif covariates and not covaries_file:
E.warn("No covariate file specified. None included in model.")
elif covariates_file and not covariates:
E.warn("No covariates specified to include in the model."
"None included")
else:
pass
self.statement["assoc"] = " ".join(statement)
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def _dimension_reduction(self, **kwargs):
'''
Use PCA to perform dimensionality reduction on
input samples. A PCA can be calculated using
a subset of samples which can then be projected on
to other samples.
'''
# FINISH ME!!!!
def _detect_interactions(self, method=None, modifier=None,
set_file=None, set_mode=None,
report_threshold=None,
sig_threshold=None,
covariates_file=None, covariates=None):
'''
Detect epistatic interactions between SNPs using either an inaccurate
scan (fast-epistasis) or a fully saturated linear model
Methods
-------
fast_epistasis - uses an "imprecise but fast" scan of all 3x3 joint genotype
count tables to test for interactions. Can be modified to use a likelihood
ration test `boost` or a joint-effects test `joint-effects`. Default is
`joint-effects`.
epistasis - uses a linear model to test for interactions between additive
effects after main effects. Logistic regression for case/control and
linear regression for quantitative traits.
two_locus - tests a single interaction between two variants using joint genotype
counts and frequencies.
adjusted - allows adjustment for covariates in the interaction test, and also adjusts
for main effects from both the test and target SNP. Requires and R plugin script.
'''
interact_map = {"fast_epistasis": " --fast-epistasis %s ",
"epistasis": " --epistasis %s ",
"two_locus": " --twolocus %s ",
"adjusted": " --R %s "}
statement = []
if modifier:
statement.append(interact_map[method] % modifier)
else:
modifier = ""
statement.append(interact_map[method] % modifier)
if covariates_file:
statement.append("--covar %s --covar-name %s " % (covariates_file,
covariates))
else:
pass
if set_mode and set_file:
# does not work with two-locus test
if method == "two_locus" and set_mode:
E.warn("Two locus test cannot be used in conjunction "
"with a set-based test.")
elif set_mode:
statement.append(" %s --set %s " % (set_mode, set_file))
else:
pass
else:
pass
# alter reporting of significant interactions and significance
# level of interactions
if report_threshold:
statement.append(" --epi1 %0.3f " % float(report_threshold))
else:
pass
if sig_threshold:
statement.append(" --epi2 %0.3f " % float(sig_threshold))
else:
pass
self.statement["epistasis"] = " ".join(statement)
def _matrices(self, matrix_type, shape="triangle", compression=None, options=None):
'''
Calculate a number of different distance matrices:
realised genetic relationship matrix
relationship covariance matrix
identity by descent/state matrix
hamming distance matrix
* matrix_type - matrix to compute. Can be either IBS, 1 - IBS,
Hamming, GRM
'''
statement = []
if matrix_type == "hamming":
flag = " --distance "
elif matrix_type == "ibs":
flag = " --distance ibs "
elif matrix_type == "genomic":
flag = " --distance 1-ibs "
elif matrix_type == "grm":
flag = " --make-grm-bin "
if options:
statement.append(" ".join([flag, shape, compression, options]))
elif matrix_type == "grm":
statement.append(flag)
else:
statement.append(" ".join([flag, shape, compression]))
return " ".join(statement)
def _qc_methods(self, parameter=None, **kwargs):
''''
Perform QC on genotyping data, SNP-wise and sample-wise.
All arguments are passed as key word arguments, except
cases detailed in `Parameters` where they are passed with
the ``parameter`` argument.
Methods
-------
* ld_prune - generate a list of SNPs in linkage equilibrium by
pruning SNPs on either an LD statistic threshold, i.e. r^2,
or use a variance inflation factor (VIF) threshold
* heterozygosity - calculate average heterozygosity from each
individual across a set of SNPs, threshold on individuals
with deviation from expected proportions
* ibd - calculate the genetic relationship of individuals to
infer relatedness between individuals, threshold on given
degree of relatedness, e.g. IBD > 0.03125, 3rd cousins
* genetic_gender - estimate the gender of an individual
from the X chromosome genotypes - correlate with reported
gender and output discrepancies
* ethnicity_pca - perform PCA using a subset of independent
SNPs to infer genetic ancestry. Compare and contrast this
to individuals reported ancestry. Report discrepancies
and individuals greater than a threshold distance away
from a reference population.
* homozygosity - identifies sets of runs of homozygosity
within individuals. These may be indicative of inbreeding,
systematic genotyping errors or regions under selection.
Parameters
----------
Method parameters can also be passed through this function
as keyword=value pairs.
* ld_prune:
`kb` - this modifier changes the window resolution to kb
rather than bp.
`r2` - the r^2 threshold above which SNPs are to be removed
`vif` - the VIF threshold over which SNPs will be removed
`window` - window size to calculate pair-wise LD over
`step` - step size to advance window by
'''
qc_dict = {"ld_prune": {"R2": " --indep-pairwise %s %s %s ",
"VIF": " --indep %s %s %s "},
"heterozygosity": {"gz": " --het gz",
"raw": " --het "},
"ibd": {"relatives": " --genome gz rel-check ",
"full": " --genome gz full ",
"norm": " --genome gz "},
"genetic_gender": "none",
"ethnicity_pca": "none",
"homozygosity": {"min_snp": " --homozyg-snp %s ",
"min_kb": " --homozyg-kb %s ",
"default": " --homozyg ",
"density": " --homozyg-density ",
"set_gap": " --homozyg-gap ",
"snp_window": " --homozyg-window-snp %s ",
"het_max": " --homozyg-het %s "}}
task_dict = {}
state = []
# put everything in an accessible dictionary first
for task, value in kwargs.items():
task_dict[task] = value
# LD pruning can be passed multiple parameters,
# handle this separately
try:
sub_task = task_dict["ld_prune"]
ld_prune_task = qc_dict["ld_prune"]
try:
step = task_dict["step"]
except KeyError:
raise AttributeError("No step size found, please "
"pass a step size to advance the "
"window by")
try:
window = task_dict["window"]
try:
task_dict["kb"]
window = "".join([window, "kb"])
task_dict.pop("kb", None)
except KeyError:
pass
except KeyError:
raise AttributeError("No window size found. Please input "
"a window size to prune over")
try:
threshold = task_dict["threshold"]
except KeyError:
raise AttributeError("No threshold value, please input "
"a value to LD prune SNPs on")
# add in the kb if it is passed as an argument
state.append(ld_prune_task[sub_task] % (window, step, threshold))
task_dict.pop("threshold", None)
task_dict.pop("ld_prune", None)
task_dict.pop("window", None)
task_dict.pop("step", None)
except KeyError:
pass
for task, value in task_dict.items():
try:
sub_task = qc_dict[task]
try:
state.append(sub_task[value] % parameter)
except TypeError:
state.append(sub_task[value])
except KeyError:
raise AttributeError("Task not found, please see "
"documentation for available features")
self.statement["QC"] = " ".join(state)
def build_statement(self, infiles, outfile, threads=None,
memory="60G", parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["QC"])
except KeyError:
pass
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["stats"])
except KeyError:
pass
try:
statement.append(self.statement["assoc"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["epistasis"])
except KeyError:
pass
if threads:
statement.append(" --threads %i " % threads)
else:
pass
if not memory:
pass
elif memory != "60G":
memory = int(memory.strip("G")) * 1000
statement.append(" --memory %i " % memory)
else:
statement.append(" --memory 60000 ")
# add output flag
# outfile needs to be complete path for Plink to save
# results properly - check if it starts with '/',
# if so is already a full path
if not parallel:
if os.path.isabs(outfile):
statement.append(" --out %s " % outfile)
else:
outpath = "/".join([os.getcwd(), outfile])
statement.append(" --out %s " % outpath)
os.system(" ".join(statement))
else:
# parallelisation only really applies to GRM calculation
# at the moment <- need to generalise
# if parallelisation is used, invoke temp files
# then agglomerate files
statements = []
if os.path.isabs(outfile):
outpath = outfile
else:
outpath = "/".join([os.getcwd(), outfile])
for i in range(1, parallel+1):
# copy list, assigning just makes a pointer
p_state = statement[:]
p_state.append(" --parallel %i %i " % (i, parallel))
p_state.append(" --out %s.%i " % (outpath, i))
statements.append(" ".join(p_state))
os.system(";".join(statements))
class PlinkDev(Plink2):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9_devel to be in the users PATH variable as ``plinkdev`` to
distinguish it from Plink v1.07 and v1.9.
Currently uses Nov 11 development build.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plinkdev"
self.statement = {}
self.filters = []
class GWASResults(object):
'''
A class for handling the results from a GWA, used for plotting
and post-analysis QC
'''
def __init__(self, assoc_file, **kwargs):
# if the assoc_file is a list of multiple files,
# then merge them into a single files
if type(assoc_file) == list and len(assoc_file) > 1:
E.info("multiple results files detected")
self.infiles = assoc_file
self.infile = None
self.results = self.parse_genome_wide(assoc_file)
else:
E.info("single results file detected")
self.infile = assoc_file
self.infiles = None
# results is a pandas dataframe to operate on
self.results = self.get_results(assoc_file, **kwargs)
def parse_genome_wide(self, association_files):
'''
Accept a list of results files, merge them together
and output as a single dataframe
Will this take a lot of memory??
'''
file0 = association_files.pop(0)
df = self.get_results(file0)
for afile in association_files:
_df = self.get_results(afile)
df = df.append(_df)
df["CHR"] = df["CHR"].astype(np.int64)
df.sort_values(by=["CHR", "BP"], inplace=True)
return df
def get_results(self, association_file,
epistasis=False,
file_format="plink"):
'''
Parse a GWA or epistasis results file and return the table
'''
# use Pandas for now - try something different later
# SQLite DB maybe?
# inconsistent number of white spaces between
# fields means Pandas parsing breaks down
# fields need to be the correct data type,
# i.e. BP = int, P = float, SNP = str, etc
# if the file has already been parsed and processed
# just assign it instead
# epistasis results don't have a header
try:
peek = pd.read_table(association_file, nrows=5,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
peek = pd.read_table(association_file, nrows=5,
sep="\t", header=0,
index_col=None)
if epistasis:
try:
results_frame = pd.read_table(association_file,
sep="\s*", header=0,
index_col=None)
except StopIteration:
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None)
# results from fast epistasis are different to others
if file_format == "cassi_covar":
if results_frme.shape[1] == 12:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP1",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P"]
elif results_frame.shape[1] == 14:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP1",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P",
"CASE_RSQ", "CTRL_RSQ"]
elif results_frame.shape[1] == 16:
results_frame.columns = ["SNP1", "CHR1", "ID1", "BP",
"SNP2", "CHR2", "ID2", "BP2",
"OR", "SE", "STAT", "P",
"CASE_RSQ", "CTRL_RSQ",
"CASE_DPRIME" "CTRL_DPRIME"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
elif file_format == "cassi":
pass
elif file_format == "plink":
if results_frame.shape[1] == 7:
results_frame.columns = ["CHR1", "SNP1", "CHR",
"SNP", "OR", "STAT", "P"]
elif results_frame.shape[1] == 9:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "NMISS",
"OR", "SE", "STAT", "P"]
else:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "OR",
"SE", "STAT", "P"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
results_frame.loc[:, "P"] = pd.to_numeric(results_frame["P"],
errors="coerce")
return results_frame
else:
try:
assert peek["log10P"].any()
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None,
dtype={"BP": np.int64,
"NMISS": np.int64})
return results_frame
except KeyError:
pass
l_count = 0
E.info("parsing file: %s" % association_file)
with open(association_file, "r") as ifile:
for line in ifile:
# check if spacing is whitespace or tab
if len(line.split(" ")) > 1:
parsed = line.split(" ")
elif len(line.split("\t")) > 1:
parsed = line.split("\t")
else:
raise IOError("file separator not recognised. "
"Must be whitespace or tab")
# remove multiple blank spaces
for i in range(parsed.count('')):
parsed.remove('')
# get rid of the newline
try:
parsed.remove('\n')
except ValueError:
parsed = [(px).rstrip("\n") for px in parsed]
if l_count == 0:
header = [iy.upper() for ix, iy in enumerate(parsed)]
head_idx = [ix for ix, iy in enumerate(parsed)]
map_dict = dict(zip(head_idx, header))
res_dict = dict(zip(header, [[] for each in header]))
l_count += 1
else:
col_idx = [lx for lx, ly in enumerate(parsed)]
col = [ly for lx, ly in enumerate(parsed)]
for i in col_idx:
res_dict[map_dict[i]].append(col[i])
l_count += 1
# substract one from the index for the header column
df_idx = range(l_count-1)
results_frame = pd.DataFrame(res_dict, index=df_idx)
results_frame.fillna(value=1.0, inplace=True)
try:
results_frame = results_frame[results_frame["TEST"] == "ADD"]
except KeyError:
pass
# need to handle NA as strings
results_frame["P"][results_frame["P"] == "NA"] = 1.0
results_frame["BP"] = [int(bx) for bx in results_frame["BP"]]
results_frame["P"] = [np.float64(fx) for fx in results_frame["P"]]
try:
results_frame["STAT"][results_frame["STAT"] == "NA"] = 1.0
results_frame["STAT"] = [np.float64(sx) for sx in results_frame["STAT"]]
except KeyError:
try:
results_frame["CHISQ"][results_frame["CHISQ"] == "NA"] = 1.0
results_frame["CHISQ"] = [np.float64(sx) for sx in results_frame["CHISQ"]]
except KeyError:
try:
results_frame["T"][results_frame["T"] == "NA"] = 1.0
results_frame["T"] = [np.float64(sx) for sx in results_frame["T"]]
except KeyError:
pass
try:
results_frame["F_U"][results_frame["F_U"] == "NA"] = 0.0
results_frame["F_U"] = [np.float64(ux) for ux in results_frame["F_U"]]
except KeyError:
pass
try:
results_frame["F_A"][results_frame["F_A"] == "NA"] = 0.0
results_frame["F_A"] = [np.float64(ax) for ax in results_frame["F_A"]]
except KeyError:
pass
try:
results_frame["FREQ"][results_frame["FREQ"] == "NA"] = 0.0
results_frame["FREQ"] = [np.float64(fx) for fx in results_frame["FREQ"]]
except KeyError:
pass
try:
results_frame["OR"][results_frame["OR"] == "NA"] = 1.0
results_frame["OR"] = [np.float64(ox) for ox in results_frame["OR"]]
except KeyError:
try:
results_frame["BETA"][results_frame["BETA"] == "NA"] = 1.0
results_frame["BETA"] = [np.float64(ox) for ox in results_frame["BETA"]]
except KeyError:
results_frame["B"][results_frame["B"] == "NA"] = 0.0
results_frame["B"] = [np.float64(ox) for ox in results_frame["B"]]
return results_frame
def plotManhattan(self, save_path, resolution="chromosome",
write_merged=True, sig_level=8):
'''
Generate a basic manhattan plot of the association results
Just deal with chromosome-by-chromosome for now.
'''
# use the python ggplot plotting package
# need to calculate -log10P values separately
self.results["log10P"] = np.log10(self.results["P"])
# or using rpy2
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
R('''sink(file="sink.text")''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
if resolution == "chromosome":
R('''assoc.df$CHR <- factor(assoc.df$CHR, '''
'''levels=levels(ordered(unique(assoc.df$CHR))),'''
'''labels=unique(paste0("chr", assoc.df$CHR)))''')
R('''nchrom <- length(unique(assoc.df$CHR))''')
R('''myCols <- rep(c("#ca0020", "#404040"), nchrom)[1:nchrom]''')
R('''names(myCols) <- sort(unique(assoc.df$CHR))''')
R('''colScale <- scale_colour_manual(name = "CHR", values=myCols)''')
R('''bp_indx <- seq_len(dim(assoc.df[1]))''')
R('''assoc.df$BPI <- bp_indx''')
R('''p <- ggplot(assoc.df, aes(x=BPI, y=-log10(P), colour=CHR)) + '''
'''geom_point(size=1) + colScale + '''
'''geom_hline(yintercept=6, linetype="dashed", colour="blue") + '''
'''theme_bw() + labs(x="Chromosome position (bp)", '''
'''y="-log10 P-value") + facet_grid(~CHR, scale="free_x") + '''
'''theme(axis.text.x = element_text(size=8))''')
R('''png("%s", res=90, unit="in", height=8, width=12)''' % save_path)
R('''print(p)''')
R('''dev.off()''')
elif resolution == "genome_wide":
R('''nchroms <- length(unique(assoc.df$CHR))''')
R('''png("%s", width=720, height=540)''' % save_path)
R('''p <- manhattan(assoc.df, main="Manhattan plot",'''
'''ylim=c(0, 50), cex=0.9, suggestiveline=T,'''
'''genomewideline=-log10(5e-8), chrlabs=c(1:nchroms), '''
'''col=c("#8B1A1A","#8470FF"))''')
R('''print(p)''')
R('''dev.off()''')
R('''sink(file=NULL)''')
if write_merged:
return self.results
else:
return False
def plotQQ(self, save_path, resolution="chromosome"):
'''
Generate a QQ-plot of expected vs. observed
test statistics
'''
self.results["log10P"] = np.log(self.results["P"])
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
R('''png("%s", width=720, height=540)''' % save_path)
R('''qq(assoc.df$P)''')
R('''dev.off()''')
def plotEpistasis(self, save_path, resolution="chromosome"):
'''
Generate both manhattan plot of the SNPs tested for
epistasis with their target SNP, and a QQplot
of the association test p-values
'''
# plot QQplot
qq_save = "_".join([save_path, "qqplot.png"])
self.plotQQ(qq_save)
manhattan_save = "_".join([save_path, "manhattan.png"])
self.plotManhattan(manhattan_save,
resolution=resolution,
sig_level=6,
write_merged=False)
def getHits(self, threshold=0.00000005):
'''
Pull out regions of association by selecting
all SNPs with association p-values less than
a certain threshold. Defaults is genome-wide
signifance, p < 5x10-8.
Then select region +/- 1.5Mb of the index SNP.
'''
hits_df = self.results[self.results["P"] <= threshold]
# find the range of SNPs with 3Mb of each index SNP
contig_group = hits_df.groupby(["CHR"])
# there may be multiple independent hits on a given
# chromosome. Need to identify independent regions.
# Independent regions are defined by their statistical
# independence, not distance. Just take all SNPs
# in 3Mb of the lead SNP for each signal
# this will create overlaps of associatation signals
for contig, region in contig_group:
region.index = region["BP"]
chr_df = self.results[self.results["CHR"] == contig]
chr_df.index = chr_df["BP"]
# find independent regions and output consecutively
# if only a single SNP above threshold then there is
# only one independent region!!
if len(region) > 1:
independents = self.findIndependentRegions(region)
indi_group = independents.groupby("Group")
else:
region["Group"] = 1
indi_group = region.groupby("Group")
for group, locus in indi_group:
# if there is only a single variant should
# the region be kept? Likely a false
# positive
if min(locus["BP"]) == max(locus["BP"]):
pass
else:
try:
try:
locus.loc[:, "STAT"] = abs(locus["STAT"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.loc[:, "T"] = abs(locus["T"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.sort_values(by="CHISQ", inplace=True)
index_bp = locus.iloc[0]["BP"]
E.info("Lead SNP for regions is: {}".format(locus.iloc[0]["SNP"]))
left_end = min(chr_df.loc[chr_df.index >= index_bp - 1500000, "BP"])
right_end = max(chr_df.loc[chr_df.index <= index_bp + 1500000, "BP"])
range_df = chr_df.loc[left_end: right_end, :]
max_stat = max(abs(range_df["STAT"]))
yield contig, range_df
def extractSNPs(self, snp_ids):
'''
Extract a specific set of SNP results
Arguments
---------
snp_ids: list
a list of SNP IDs to extract from the
GWAS results
Returns
-------
snp_results: pandasd.Core.DataFrame
'''
self.results.index = self.results["SNP"]
snp_results = self.results.loc[snp_ids]
return snp_results
def findIndependentRegions(self, dataframe):
'''
Find the number of independent regions on
a chromsome. Uses R distance and tree
cutting functions
'''
# mong dataframe into R
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(dataframe)
R.assign("rdf", r_df)
R('''mat <- as.matrix(rdf$BP)''')
# get distances then cluster, chop tree at 1x10^7bp
R('''dist.mat <- dist(mat, method="euclidean")''')
R('''clusts <- hclust(dist.mat, "average")''')
R('''cut <- cutree(clusts, h=1e6)''')
R('''out.df <- rdf''')
R('''out.df$Group <- cut''')
# need to handle changes in pandas2ri API
try:
regions_df = pd.DataFrame(py2ri.ri2py(R["out.df"]))
except NotImplementedError:
regions_df = pd.DataFrame(R["out.df"])
return regions_df
def mergeFrequencyResults(self, freq_dir, file_regex):
'''
Merge GWAS results with frequency information,
and format for GCTA joint analysis input
'''
# create a dummy regex to compare
# file_regex type against
test_re = re.compile("A")
if type(file_regex) == str:
file_regex = re.compile(file_regex)
elif type(file_regex) == type(test_re):
pass
else:
raise TypeError("Regex type not recognised. Must"
"be string or re.SRE_Pattern")
all_files = os.listdir(freq_dir)
freq_files = [fx for fx in all_files if re.search(file_regex, fx)]
gwas_df = self.results
df_container = []
for freq in freq_files:
freq_file = os.path.join(freq_dir, freq)
E.info("Adding information from {}".format(freq_file))
# files may or may not be tab-delimited
try:
_df = pd.read_table(freq_file,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
_df = pd.read_table(freq_file,
sep="\t", header=0,
index_col=None)
merge_df = pd.merge(self.results, _df,
left_on=["CHR", "SNP"],
right_on=["CHR", "SNP"],
how='left')
df_container.append(merge_df)
count = 0
for df in df_container:
if not count:
gwas_df = df
count += 1
else:
gwas_df = gwas_df.append(df)
E.info("Calculating Z scores and SEs")
z_scores = -0.862 + np.sqrt(0.743 - 0.2404 *
np.log(gwas_df.loc[:, "P"]))
se = np.log(gwas_df.loc[:, "OR"])/z_scores
gwas_df.loc[:, "Z"] = z_scores
gwas_df.loc[:, "SE"] = se
gwas_df.loc[:, "logOR"] = np.log(gwas_df.loc[:, "OR"])
out_cols = ["SNP", "A1_x", "A2", "MAF", "logOR", "SE", "P", "NMISS"]
out_df = gwas_df[out_cols]
# need to remove duplicates, especially those
# that contain NaN for A2 and MAF
out_df = out_df.loc[~np.isnan(out_df["MAF"])]
return out_df
##########################################################
# unbound methods that work on files and data structures #
##########################################################
def plotMapPhenotype(data, coords, coord_id_col, lat_col,
long_col, save_path, xvar, var_type,
xlabels=None, level=None):
'''
Generate a map of the UK, with phenotype data overlaid
'''
# merge co-ordinate data with phenotype data
merged_df = pd.merge(left=coords, right=data, left_on=coord_id_col,
right_on=coord_id_col, how='inner')
# pheno column and set level of categorical variable
if xlabels and var_type == "categorical":
# convert to string type as a categorical variable
# drop NA observations from the merged data frame
na_mask = pd.isnull(merged_df.loc[:, xvar])
merged_df = merged_df[~na_mask]
rvar = merged_df.loc[:, xvar].copy()
nvar = pd.Series(np.nan_to_num(rvar), dtype=str)
var = [v for v in set(nvar)]
var.sort()
# recode the variables according to the input labels
xlabs = xlabels.split(",")
lbls = [str(xlabs[ix]) for ix in range(len(var))]
for xv in range(len(var)):
nvar[nvar == var[xv]] = lbls[xv]
merged_df.loc[:, "cat_var"] = nvar
else:
pass
if level:
lvar = merged_df.loc[:, "cat_var"].copy()
mask = lvar.isin([level])
lvar[mask] = 1
lvar[~mask] = 0
lvar = lvar.fillna(0)
merged_df.loc[:, "dichot_var"] = lvar
else:
pass
# push the df into the R env
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(merged_df)
R.assign("pheno.df", r_df)
# setup the map and plot the points
R('''suppressPackageStartupMessages(library(maps))''')
R('''suppressPackageStartupMessages(library(mapdata))''')
R('''uk_map <- map("worldHires", c("UK", "Isle of Wight",'''
'''"Ireland", "Isle of Man", "Wales:Anglesey"), '''
'''xlim=c(-11, 3), ylim=c(50, 60.9), plot=F)''')
# colour by reference, or a colour for each discrete value
if level:
R('''red <- rep("#FF0000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 1]))''')
R('''black <- rep("#000000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 0]))''')
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points((-pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 1], '''
'''(-pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 1], pch=".", col=red)''' % locals())
R('''points((pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 0], '''
'''(pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 0], pch=".", col=black)''' % locals())
R('''legend('topleft', legend=c("not-%(level)s", "%(level)s"),'''
'''fill=c("#000000", "#FF0000"))''' % locals())
R('''dev.off()''')
else:
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points(pheno.df[,"%(long_col)s"], pheno.df[,"%(lat_col)s"], pch=".", '''
'''col=factor(pheno.df$cat_var))''' % locals())
R('''legend('topleft', legend=unique(pheno.df$cat_var),'''
'''fill=unique(pheno.df$cat_var))''' % locals())
R('''dev.off()''')
def plotPhenotype(data, plot_type, x, y=None, group=None,
save_path=None, labels=None, xlabels=None,
ylabels=None, glabels=None, var_type="continuous"):
'''
Generate plots of phenotypes using ggplot
'''
# change data format if necessary and convert nan/NA to missing
if not y and var_type == "categorical":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=str)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "integer":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=np.int64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "continuous":
var = data.loc[:, x].copy()
data.loc[:, x] = pd.Series(var, dtype=np.float64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "categorical":
xvar = np.nan_to_num(data.loc[:, x].copy())
yvar = np.nan_to_num(data.loc[:, y].copy())
data.loc[:, x] = | pd.Series(xvar, dtype=str) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = self.frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
])
def test_transform_method_name(self, method):
# https://github.com/pandas-dev/pandas/issues/19760
df = pd.DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_multiple_mixed_no_warning(self):
# https://github.com/pandas-dev/pandas/issues/20909
mdf = pd.DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0],
"C": ['bar', 'foobarbaz'],
"D": [pd.Timestamp('2013-01-01'), pd.NaT]},
index=['min', 'sum'])
# sorted index
with tm.assert_produces_warning(None):
result = mdf.agg(['min', 'sum'])
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(None):
result = mdf[['D', 'C', 'B', 'A']].agg(['sum', 'min'])
# For backwards compatibility, the result's index is
# still sorted by function name, so it's ['min', 'sum']
# not ['sum', 'min'].
expected = expected[['D', 'C', 'B', 'A']]
tm.assert_frame_equal(result, expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
name1, name2 = self.frame.axes[other_axis].unique()[:2].sort_values()
# all reducers
expected = pd.concat([self.frame.mean(axis=axis),
self.frame.max(axis=axis),
self.frame.sum(axis=axis),
], axis=1)
expected.columns = ['mean', 'max', 'sum']
expected = expected.T if axis in {0, 'index'} else expected
result = self.frame.agg(['mean', 'max', 'sum'], axis=axis)
assert_frame_equal(result, expected)
# dict input with scalars
func = OrderedDict([(name1, 'mean'), (name2, 'sum')])
result = self.frame.agg(func, axis=axis)
expected = Series([self.frame.loc(other_axis)[name1].mean(),
self.frame.loc(other_axis)[name2].sum()],
index=[name1, name2])
assert_series_equal(result, expected)
# dict input with lists
func = OrderedDict([(name1, ['mean']), (name2, ['sum'])])
result = self.frame.agg(func, axis=axis)
expected = DataFrame({
name1: Series([self.frame.loc(other_axis)[name1].mean()],
index=['mean']),
name2: Series([self.frame.loc(other_axis)[name2].sum()],
index=['sum'])})
expected = expected.T if axis in {1, 'columns'} else expected
assert_frame_equal(result, expected)
# dict input with lists with multiple
func = OrderedDict([(name1, ['mean', 'sum']), (name2, ['sum', 'max'])])
result = self.frame.agg(func, axis=axis)
expected = DataFrame(OrderedDict([
(name1, Series([self.frame.loc(other_axis)[name1].mean(),
self.frame.loc(other_axis)[name1].sum()],
index=['mean', 'sum'])),
(name2, Series([self.frame.loc(other_axis)[name2].sum(),
self.frame.loc(other_axis)[name2].max()],
index=['sum', 'max'])),
]))
expected = expected.T if axis in {1, 'columns'} else expected
assert_frame_equal(result, expected)
def test_nuiscance_columns(self):
# GH 15015
df = DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
result = df.agg('min')
expected = Series([1, 1., 'bar', pd.Timestamp('20130101')],
index=df.columns)
assert_series_equal(result, expected)
result = df.agg(['min'])
expected = DataFrame([[1, 1., 'bar', pd.Timestamp('20130101')]],
index=['min'], columns=df.columns)
assert_frame_equal(result, expected)
result = df.agg('sum')
expected = Series([6, 6., 'foobarbaz'],
index=['A', 'B', 'C'])
assert_series_equal(result, expected)
result = df.agg(['sum'])
expected = DataFrame([[6, 6., 'foobarbaz']],
index=['sum'], columns=['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_non_callable_aggregates(self):
# GH 16405
# 'size' is a property of frame/series
# validate that this is working
df = DataFrame({'A': [None, 2, 3],
'B': [1.0, np.nan, 3.0],
'C': ['foo', None, 'bar']})
# Function aggregate
result = df.agg({'A': 'count'})
expected = Series({'A': 2})
assert_series_equal(result, expected)
# Non-function aggregate
result = df.agg({'A': 'size'})
expected = Series({'A': 3})
assert_series_equal(result, expected)
# Mix function and non-function aggs
result1 = df.agg(['count', 'size'])
result2 = df.agg({'A': ['count', 'size'],
'B': ['count', 'size'],
'C': ['count', 'size']})
expected = pd.DataFrame({'A': {'count': 2, 'size': 3},
'B': {'count': 2, 'size': 3},
'C': {'count': 2, 'size': 3}})
assert_frame_equal(result1, result2, check_like=True)
assert_frame_equal(result2, expected, check_like=True)
# Just functional string arg is same as calling df.arg()
result = df.agg('count')
expected = df.count()
assert_series_equal(result, expected)
# Just a string attribute arg same as calling df.arg
result = df.agg('size')
expected = df.size
assert result == expected
@pytest.mark.parametrize("df, func, expected", chain(
_get_cython_table_params(
DataFrame(), [
('sum', Series()),
('max', Series()),
('min', Series()),
('all', Series(dtype=bool)),
('any', Series(dtype=bool)),
('mean', Series()),
('prod', Series()),
('std', Series()),
('var', Series()),
('median', Series()),
]),
_get_cython_table_params(
DataFrame([[np.nan, 1], [1, 2]]), [
('sum', Series([1., 3])),
('max', Series([1., 2])),
('min', Series([1., 1])),
('all', Series([True, True])),
('any', Series([True, True])),
('mean', Series([1, 1.5])),
('prod', Series([1., 2])),
('std', Series([np.nan, 0.707107])),
('var', Series([np.nan, 0.5])),
('median', Series([1, 1.5])),
]),
))
def test_agg_cython_table(self, df, func, expected, axis):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = df.agg(func, axis=axis)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("df, func, expected", chain(
_get_cython_table_params(
DataFrame(), [
('cumprod', DataFrame()),
('cumsum', DataFrame()),
]),
_get_cython_table_params(
DataFrame([[np.nan, 1], [1, 2]]), [
('cumprod', DataFrame([[np.nan, 1], [1., 2.]])),
('cumsum', DataFrame([[np.nan, 1], [1., 3.]])),
]),
))
def test_agg_cython_table_transform(self, df, func, expected, axis):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = df.agg(func, axis=axis)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("df, func, expected", _get_cython_table_params(
| DataFrame([['a', 'b'], ['b', 'a']]) | pandas.DataFrame |
import numbers
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._typing import ArrayLike, DtypeObj
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import register_extension_dtype
from pandas.core.dtypes.common import (
is_bool_dtype,
is_datetime64_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.array_algos import masked_reductions
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
from .masked import BaseMaskedArray, BaseMaskedDtype
if TYPE_CHECKING:
import pyarrow
class _IntegerDtype(BaseMaskedDtype):
"""
An ExtensionDtype to hold a single size & kind of integer dtype.
These specific implementations are subclasses of the non-public
_IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
The attributes name & type are set when these subclasses are created.
"""
name: str
base = None
type: Type
def __repr__(self) -> str:
sign = "U" if self.is_unsigned_integer else ""
return f"{sign}Int{8 * self.itemsize}Dtype()"
@cache_readonly
def is_signed_integer(self) -> bool:
return self.kind == "i"
@cache_readonly
def is_unsigned_integer(self) -> bool:
return self.kind == "u"
@property
def _is_numeric(self) -> bool:
return True
@cache_readonly
def numpy_dtype(self) -> np.dtype:
""" Return an instance of our numpy dtype """
return np.dtype(self.type)
@cache_readonly
def kind(self) -> str:
return self.numpy_dtype.kind
@cache_readonly
def itemsize(self) -> int:
""" Return the number of bytes in this dtype """
return self.numpy_dtype.itemsize
@classmethod
def construct_array_type(cls) -> Type["IntegerArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return IntegerArray
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
# we only handle nullable EA dtypes and numeric numpy dtypes
if not all(
isinstance(t, BaseMaskedDtype)
or (
isinstance(t, np.dtype)
and (np.issubdtype(t, np.number) or np.issubdtype(t, np.bool_))
)
for t in dtypes
):
return None
np_dtype = np.find_common_type(
[t.numpy_dtype if isinstance(t, BaseMaskedDtype) else t for t in dtypes], []
)
if np.issubdtype(np_dtype, np.integer):
return STR_TO_DTYPE[str(np_dtype)]
return None
def __from_arrow__(
self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"]
) -> "IntegerArray":
"""
Construct IntegerArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
if not array.type.equals(pyarrow_type):
array = array.cast(pyarrow_type)
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=self.type)
int_arr = IntegerArray(data.copy(), ~mask, copy=False)
results.append(int_arr)
return IntegerArray._concat_same_type(results)
def integer_array(values, dtype=None, copy: bool = False) -> "IntegerArray":
"""
Infer and return an integer array of the values.
Parameters
----------
values : 1D list-like
dtype : dtype, optional
dtype to coerce
copy : bool, default False
Returns
-------
IntegerArray
Raises
------
TypeError if incompatible types
"""
values, mask = coerce_to_array(values, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
def safe_cast(values, dtype, copy: bool):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting="safe", copy=copy)
except TypeError as err:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError(
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
) from err
def coerce_to_array(
values, dtype, mask=None, copy: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, "dtype"):
if | is_integer_dtype(values.dtype) | pandas.core.dtypes.common.is_integer_dtype |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
| assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 20:33:03 2021
@author: Andrei
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from scipy.stats import skew
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
train_df = | pd.read_csv('train.csv', index_col='Id') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
import xgboost as xgb
from catboost import CatBoostRegressor, CatBoostClassifier
from sklearn import metrics
# get some basic imformation
def resumetable(df):
print(f"Dataset Shape: {df.shape}")
summary = pd.DataFrame(df.dtypes,columns=['dtypes'])
summary = summary.reset_index()
summary['Name'] = summary['index']
summary = summary[['Name','dtypes']]
summary['Missing'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
summary['First Value'] = df.loc[0].values
summary['Second Value'] = df.loc[1].values
summary['Third Value'] = df.loc[2].values
# for name in summary['Name'].value_counts().index:
# summary.loc[summary['Name'] == name, 'Entropy'] = round(stats.entropy(df[name].value_counts(normalize=True), base=2),2)
return summary
# reduce memory
def reduce_men_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
# 1024**2 = 1024*1024
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose:
print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(
end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
# remove some outliers
def CalcOutliers(df_num):
# calculating mean and std(标准差) of the array
data_mean, data_std = np.mean(df_num), np.std(df_num)
# seting the cut line to both higher and lower values
# You can change this value
cut = data_std * 3
#Calculating the higher and lower cut values
lower, upper = data_mean - cut, data_mean + cut
# creating an array of lower, higher and total outlier values
outliers_lower = [x for x in df_num if x < lower]
outliers_higher = [x for x in df_num if x > upper]
outliers_total = [x for x in df_num if x < lower or x > upper]
# array without outlier values
outliers_removed = [x for x in df_num if x > lower and x < upper]
print('Identified lowest outliers: %d' % len(outliers_lower)) # printing total number of values in lower cut of outliers
print('Identified upper outliers: %d' % len(outliers_higher)) # printing total number of values in higher cut of outliers
print('Total outlier observations: %d' % len(outliers_total)) # printing total number of values outliers of both sides
print('Non-outlier observations: %d' % len(outliers_removed)) # printing total number of non outlier values
print("Total percentual of Outliers: ", round((len(outliers_total) / len(outliers_removed) )*100, 4)) # Percentual of outliers in points
return
# PCA for Vxx
from sklearn.preprocessing import minmax_scale
from sklearn.decomposition import PCA
# from sklearn.cluster import KMeans
mas_v = train.columns[55:394]
def PCA_change(df, cols, n_components, prefix='PCA_', rand_seed=4):
pca = PCA(n_components=n_components, random_state=rand_seed)
principalComponents = pca.fit_transform(df[cols])
principalDf = pd.DataFrame(principalComponents)
df.drop(cols, axis=1, inplace=True)
principalDf.rename(columns=lambda x: str(prefix) + str(x), inplace=True)
df = pd.concat([df, principalDf], axis=1)
return df
for col in mas_v:
train[col] = train[col].fillna((train[col].min() - 2))
train[col] = (minmax_scale(train[col], feature_range=(0, 1)))
test[col] = test[col].fillna((test[col].min() - 2))
test[col] = (minmax_scale(test[col], feature_range=(0, 1)))
train = PCA_change(train, mas_v, prefix='PCA_V_', n_components=30)
test = PCA_change(test, mas_v, prefix='PCA_V_', n_components=30)
# model selection_classification
def train_model_classification(X, X_test, y, params, folds, model_type='lgb', eval_metric='auc', columns=None, plot_feature_importance=False, model=None,
verbose=10000, early_stopping_rounds=200, n_estimators=50000, splits=None, n_folds=3, averaging='usual', n_jobs=-1,
eval_auc=None):
"""
https://www.kaggle.com/artgor/eda-and-models/notebook
A function to train a variety of classification models.
Returns dictionary with oof predictions, test predictions, scores and, if necessary, feature importances.
:params: X - training data, can be pd.DataFrame or np.ndarray (after normalizing)
:params: X_test - test data, can be pd.DataFrame or np.ndarray (after normalizing)
:params: y - target
:params: folds - folds to split data
:params: model_type - type of model to use
:params: eval_metric - metric to use
:params: columns - columns to use. If None - use all columns
:params: plot_feature_importance - whether to plot feature importance of LGB
:params: model - sklearn model, works only for "sklearn" model type
"""
columns = X.columns if columns is None else columns
n_splits = folds.n_splits if splits is None else n_folds
X_test = X_test[columns]
# to set up scoring parameters
metrics_dict = {'auc': {'lgb_metric_name': eval_auc,
'catboost_metric_name': 'AUC',
'sklearn_scoring_function': metrics.roc_auc_score},
}
result_dict = {}
if averaging == 'usual':
# out-of-fold predictions on train data
oof = np.zeros((len(X), 1))
# averaged predictions on train data
prediction = np.zeros((len(X_test), 1))
elif averaging == 'rank':
# out-of-fold predictions on train data
oof = np.zeros((len(X), 1))
# averaged predictions on train data
prediction = np.zeros((len(X_test), 1))
# list of scores on folds
scores = []
feature_importance = pd.DataFrame()
# split and train on folds
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
print(f'Fold {fold_n + 1} started at {time.ctime()}')
if type(X) == np.ndarray:
X_train, X_valid = X[columns][train_index], X[columns][valid_index]
y_train, y_valid = y[train_index], y[valid_index]
else:
X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
if model_type == 'lgb':
model = lgb.LGBMClassifier(**params, n_estimators=n_estimators, n_jobs=n_jobs)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
eval_metric=metrics_dict[eval_metric]['lgb_metric_name'],
verbose=verbose, early_stopping_rounds=early_stopping_rounds)
y_pred_valid = model.predict_proba(X_valid)[:, 1]
y_pred: object = model.predict_proba(X_test, num_iteration=model.best_iteration_)[:, 1]
if model_type == 'xgb':
train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns)
valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns)
watchlist = [(train_data, 'train'), (valid_data, 'valid_data')]
model = xgb.train(dtrain=train_data, num_boost_round=n_estimators, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbose, params=params)
y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns), ntree_limit=model.best_ntree_limit)
y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit)
if model_type == 'sklearn':
model = model
model.fit(X_train, y_train)
y_pred_valid = model.predict(X_valid).reshape(-1,)
score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)
print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.')
print('')
y_pred = model.predict_proba(X_test)
if model_type == 'cat':
model = CatBoostClassifier(iterations=n_estimators, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'], **params,
loss_function=Logloss)
model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False)
y_pred_valid = model.predict(X_valid)
y_pred = model.predict(X_test)
if averaging == 'usual':
oof[valid_index] = y_pred_valid.reshape(-1, 1)# must be one column
scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid))
prediction += y_pred.reshape(-1, 1)
elif averaging == 'rank':
oof[valid_index] = y_pred_valid.reshape(-1, 1)
scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid))
prediction += pd.Series(y_pred).rank().values.reshape(-1, 1)
if model_type == 'lgb' and plot_feature_importance:
# feature importance
fold_importance = pd.DataFrame()
fold_importance["feature"] = columns
fold_importance["importance"] = model.feature_importances_
fold_importance["fold"] = fold_n + 1
feature_importance = | pd.concat([feature_importance, fold_importance], axis=0) | pandas.concat |
"""
Utilises the powerful tools of Selenium to safely navigate and collect data from websites without the use of an API.
"""
from typing import Tuple
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import numpy as np
import pandas as pd
from time import sleep
import uuid
class AirbnbScraper:
def __init__(self, slow_internet_speed : bool=False, config : str='default', messages : bool=False):
"""A Webscraper that crawls through Airbnb's website and gathers structured/unstructured data.
When an instance of Scraper is initialized, a Selenium Webdriver gets the homepage by use
of the `url` attribute. Then it clicks past the cookie wall (if applicable), and navigates onto
the main products hub.
Parameters
----------
slow_internet_speed : bool, default=False
The crawler is designed to allow for lag whilst loading elements on a page, but users with a
particularly slow internet speed may cause the crawler to miss elements. A `slow_internet_speed` flag
allows those users to still enjoy the potential of the scraper. It is not recommended to run the full
scraper `scrape_all()` with `slow_internet_speed` enabled. This will take > 12 hours.
config : str, defualt = 'default'
Option to confiigure the selenium webdriver to operate in 'headless' mode or 'default' mode.
messages : bool, default=False
Option to activate messages of each successful item saved by the scraper, and any errors if applied.
Attributes
----------
BATCH_ATTEMPTS : int
It is common that a Scraper can fail to find an element on a webpage for numerous reasons,
for example that the element hasn't been loaded yet. `BATCH_ATTEMPTS` allows for this and
offers up to 25 attempts for the Scraper to locate and pull data from each element it is looking
for, until the Scraper assumes that the element doesn't exist in the particular page. If
`slow_internet_speed` is enabled, the attempts limit is increased to 50.
main_url : str
The URL for Airbnb's home page, provided for the Selenium webdriver to get upon initialization
of the Scraper object.
driver : Selenium Webdriver
The webdriver that is utilized to crawl through Airbnb's website
slow_internet_speed : bool
The crawler is designed to allow for lag whilst loading elements on a page, but users with a
particularly slow internet speed may cause the crawler to miss elements. A `slow_internet_speed` flag
allows those users to still enjoy the potential of the scraper. It is not recommended to run the full
scraper `scrape_all()` with `slow_internet_speed` enabled. This will take > 12 hours.
messages : bool
Option to activate messages of each successful item saved by the scraper, and any errors if applied.
"""
self.main_url = "https://www.airbnb.co.uk/"
self.slow_internet_speed = slow_internet_speed
self.driver = None
self.BATCH_ATTEMPTS = 50 if self.slow_internet_speed else 25
self.messages = messages
self.COOKIE_CLICKED = False
# Initialising the selenium webdriver
options = webdriver.ChromeOptions()
if config == 'default':
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--start-maximized")
self.driver = webdriver.Chrome(options=options)
elif config == 'headless':
options.add_argument('--no-sandbox')
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('--log-level=3')
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument("--window-size=1920, 1200")
options.add_argument('--disable-dev-shm-usage')
self.driver = webdriver.Chrome(options=options)
print('Running headless scraper. Do NOT close the program or interrupt the terminal.')
else:
raise ValueError(f'Configuration option "{config}" not recognised')
def get_categories(self, count : int):
"""Gets category names and corresponding urls for each product header in Airbnb's main products page.
This method first clicks past a cookie wall if applicable. Using the `driver` that has been initialised
with the Scraper object, this method located through and clicks each header button in the top menu bar of
the main products page. When each header is clicked, the category name and the current url of that clicked
header are stored into a zip object.
Parameters
----------
count : int , optional
When specified, the `count` parameter will set a limit to the number of headers that are clicked through
and consequently, the number of categories and corresponding urls that are returned. This parameter is optional,
and defaulted to 25 which is the number of total headers that populate Airbnb's products page.
Returns
-------
zip of < tuples of (str, str) >
A zipped object of tuples of the category name, followed by the url of opening that header.
Raises
------
ValueError
If the count parameter is 0 or negative
"""
# Getting the Airbnb url and clicking past the cookie wall
self.driver.get(self.main_url)
sleep(5 if self.slow_internet_speed else 2)
self._cookie_check_and_click()
# Click the I'm flexible to get to the product browser
flexible_button = self.driver.find_element(By.LINK_TEXT,"I’m flexible")
flexible_button.click()
sleep(5 if self.slow_internet_speed else 2)
# The count variable is an input to stop the header yield at any given index of iteration
# for example: if count was set to 3, then the loop below to collect header links/titles
# would break on the third iteration.
if count > 29: ### WRONG, MAKE MAX DYNAMIC
count = 29
if count < 1:
raise ValueError('Count must be a positive integer greater than 1')
#self._cookie_check_and_click()
# START of the header yield code. This uses seleniums webdriver
# to both click through and catch the header names and urls of each of the
header_container = self.driver.find_element(By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div/div/div/div/div[1]/div/nav/div/div/div/div/div[2]/div/div[1]/div/div[3]')
headers = header_container.find_elements(By.XPATH, "./*")
headers.pop()
# First, get the text for the headers up to the 'more'. (Not all headers are visible immediately)
# if the count is lower than current visible headers, this is sliced at the bottom
categories = []
category_links = []
for header in headers:
categories.append(header.text)
categories.remove('More')
categories = categories[:count]
# Click through the visible headers to get urls for each one (except for 'More')
counted = 0
for i in range(len(headers)):
headers[i].click()
if i!= len(headers) - 1:
category_links.append(self.driver.current_url)
counted +=1
# Break the entire function if count is met
if counted == count:
return zip(categories, category_links)
sleep(3 if self.slow_internet_speed else 1)
# Click the 'More' header and get the elements for rest of headers whilet they're visible
if i == len(headers) - 1:
sleep(1.5 if self.slow_internet_speed else 0.5)
more_menu = headers[i].find_element(By.XPATH, '//*[@id="flexible-destination-more-menu"]')
more_headers = more_menu.find_elements(By.XPATH, "./*")
# The offset means indexing goes 0, 0, 1, 2, 3, 4,... because of the nature of the 'More' column
for j in range(-1,len(more_headers)-1):
if j == -1:
j+=1
# Click the 'More' header and get the elements for rest of headers whilet they're visible
# the difficulty with sich a dynamic page is that this has to be repeatedly done
more_menu = headers[i].find_element(By.XPATH, '//*[@id="flexible-destination-more-menu"]')
more_headers = more_menu.find_elements(By.XPATH, "./*")
sleep(1.5 if self.slow_internet_speed else 0.5)
# Get the category name from header
categories.append(more_headers[j].text)
more_headers[j].click()
sleep(1.5 if self.slow_internet_speed else 0.5)
# After clicking that header, get the corresponding header url for it
category_links.append(self.driver.current_url)
headers[i].click()
counted+=1
# Break the entire function if count is met
if counted == count:
return zip(categories, category_links)
def __scroll(self, driver : webdriver, SCROLL_PAUSE_TIME : int):
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = self.driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
return
last_height = new_height
def get_products(self, header_url : str, SCROLLING : bool = True):
""" Returns an array of the product urls for a homepage with a certain header clicked.
Parameters
----------
header_url : str
the url of the header to be opened by the `driver` where the corresponding products can be found.
SCROLLING : bool , default=True
When a header page is opened, the lazy loading of the Airbnb's website prevents all products from
being located. When `SCROLLING` is set to True, this calls a protected method that scrolls through the
entire page so that every product is loaded and therefore the url can be stored. Setting to False is a
clever way of electing to only take a sample of the products from each header page. This parameter is
optional and defaulted to True.
Returns
-------
product_links : np.array of str
A numpy array of strings containing the urls for each product that has been found.
"""
self.driver.get(header_url)
sleep(1.5 if self.slow_internet_speed else 0.5)
self._cookie_check_and_click()
self.driver.execute_script("document.body.style.zoom='75%'")
sleep(5 if self.slow_internet_speed else 2)
# Set to FALSE when testing/sampling
if SCROLLING:
pause_time = 7 if self.slow_internet_speed else 3.5
self.__scroll(self.driver, pause_time)
for i in range(self.BATCH_ATTEMPTS):
try:
# Store all links for locations listed on page in array
places_container = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div/div/div/div/div/div/div/div[2]/div/div/div')
places = places_container.find_elements(By.XPATH, "./*" )
product_links = np.array([])
for place in places:
href = place.find_element(By.TAG_NAME, 'a')
url = f"{href.get_attribute('href')}"
product_links = np.append(product_links,url)
except Exception as e:
pass
return product_links
@staticmethod
def string_clean(text: str, str_type : str) -> str:
""" Takes in raw text from elements on Airbnb product pages and formats them into parsable strings.
Text data from elements in a product page on Airbnb's website come in a variety of forms not so easily
understandable by machines. This static method is necessary to essentially clean the text from certain elements
in each product page.
Parameters
----------
text : str
The raw text data from the element from the product page.
str_type : {'info', 'review count', 'amenities'}
The nature of the text data differs for each element of the product webpage, thus the pythonic
strategem for cleaning the text data must do the same. Specifying which page element the text comes
from will specify which set of programmatic instructions the method needs to take in order to clean
the text data.
Returns
-------
if `str_type` is 'info':
output: list of [tuples of (str, int)]
where the strings are labels of guests, bedrooms beds, and bathrooms, and the corresponding
int is their count.
if `str_type` is 'review count`:
output: int
Number of reviews for product.
if `str_type` is 'amenities':
output: int
Number of amenities for product.
Raises
------
ValueError
If the inputted string for `str_type` doesn't match any of the accepted strings.
"""
if str_type == 'info':
output = []
# Organises the text into a clean list of
# ['x guests', 'x bedrooms', 'x beds', 'x bathrooms']
# this is much easier to be iterated over and parsed
text = text.replace('·', '')
text = text.split(' ')
clean_info = []
for i in text:
clean_info.append(i)
for val in clean_info:
label = val.split()[1]
# unlikely to happen, but if theres an anomaly in the site text,
# the certain element is ignored and this doesn't mess up the data
if label not in ['guests', 'guest', 'bedrooms', 'bedroom',
'beds', 'bed', 'bathrooms' ,'bathroom', 'private bathroom']:
pass
else:
# An element with a count of '1' (e.g. 1 bedroom) has no 's' on the end, which
# will confuse the dictionary and dataframe. So all singular instances have an 's' added
if label[-1] != 's':
label += 's'
# The output is a list of tuples: [('guests', x), ('bedrooms', x) ...]
output.append((label, float(val.split()[0])))
return output
elif str_type == 'review count':
# Gets rid of brackets if they are there
text = text.replace('(','')
text = text.replace(')','')
# Split up the number and reviews string into [x, 'Reviews']
text = text.split(' ')
output = text[0]
return int(output)
elif str_type == 'amenities':
# Simply filters out the numerical value in the text:
# "Show all xx amenities"
output = int(''.join(filter(str.isdigit, text)))
return output
else:
raise ValueError('Please specify a distinct part of the page to clean. Have you checked your spelling?')
def __scrape_product_images(self, driver : webdriver):
images_container = driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[1]/div[2]/div/div/div/div/div/div/div/div[1]/div')
images = images_container.find_elements(By.TAG_NAME, 'img')
if images is None:
raise Exception
sources = []
for image in images:
sources.append(image.get_attribute('src'))
return sources
def scrape_product_data(self, product_url: str, ID : uuid.uuid4, category : str, message : bool=False):
"""Gets a page of an Airbnb product and scrapes structured and unstructured data. Utilises both Selenium and BeautifulSoup.
Parameters
----------
product_url : str
The url of the product page to be scraped
ID : int
The unique ID assigned to the particular product. This will be used to identify the data in a database/data lake.
category : str
The category name corresponding to where a product is found. This can be read on the headers tab on Airbnb's website.
message : bool, default=False
With the `message` flag enabled, the product scrape status will be logged to the terminal, as well as whether any
images were saved.
Returns
-------
product_dict : dict of {str : any}
Structured data stored in the form of a dictionary containing relevant and human readable information about the product.
image_data : list of [str, str, ...]
A tuple of source links for the images found on Airbnb's website. These can be transformed into image files.
"""
self._cookie_check_and_click()
# Initialising default dict and adding the passed ID and
# category parameters
product_dict = dict()
product_dict['ID'] = ID
product_dict['Category'] = category
# Getting the product page with driver
self.driver.get(product_url)
sleep(3 if self.slow_internet_speed else 0.5)
for i in range(self.BATCH_ATTEMPTS):
try:
image_data = self.__scrape_product_images(self.driver)
if image_data:
break
else:
raise Exception
except Exception as e:
continue
# Getting data from page. Looped through multiple attempts
# to allow for errors due to elements not being loaded yet
for j in range(self.BATCH_ATTEMPTS):
try:
# Product title (str)
for i in range(self.BATCH_ATTEMPTS):
try:
title_element = self.driver.find_element(By.TAG_NAME, 'h1')
title = title_element.text
product_dict['Title'] = title_element.text
break
except Exception as e:
continue
# Product Locaton (str)
for i in range(self.BATCH_ATTEMPTS):
try:
location_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[1]/div[1]/div/div/div/div/section/div[2]/div[1]/span[5]/button/span')
location = location_elem.text.replace(',', '')
product_dict['Location'] = location
break
except Exception as e:
continue
# Counts for beds, bedrooms, beds and bathrooms (all int)
for i in range(self.BATCH_ATTEMPTS):
try:
info_container = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div[1]/div/div/section/div/div/div/div[1]/ol' )
info = self.string_clean(
info_container.text,
str_type = 'info')
for val in info:
product_dict[val[0]] = val[1]
break
except Exception as e:
continue
# Number of Reviews (int)
for i in range(self.BATCH_ATTEMPTS):
try:
review_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[1]/div[1]/div/div/div/div/section/div[2]/div[1]/span[1]/span[3]/button')
reviews = self.string_clean(review_elem.text, 'review count')
product_dict['Review_Count'] = reviews
break
except Exception as e:
continue
# Overall star rating (float)
for i in range(self.BATCH_ATTEMPTS):
try:
rating_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[1]/div[1]/div/div/div/div/section/div[2]/div[1]/span[1]/span[2]')
overall_rating = rating_elem.text.replace('·', '')
product_dict['Overall_Rate'] = float(overall_rating)
break
except Exception as e:
continue
# Price per night (float)
for i in range(self.BATCH_ATTEMPTS):
try:
price_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[3]/div/div[2]/div/div/div[1]/div/div/div/div/div/div/div[1]/div[1]/div[1]/div/div/div/span[1]')
price_pNight = price_elem.text[1:] # Gets rid of £
product_dict['Price_Night'] = float(price_pNight)
break
except Exception as e:
continue
# Sub ratings (list of floats)
for i in range(self.BATCH_ATTEMPTS):
try:
subratings_container = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[4]/div/div/div/div[2]/div[2]/div/div')
subratings_elements = subratings_container.find_elements(By.XPATH, "./*")
for elem in subratings_elements:
subrating = elem.text.split('\n')
product_dict[subrating[0] + '_rate'] = subrating[1]
break
except Exception as e:
continue
# How many amneties each location has (int)
for i in range(self.BATCH_ATTEMPTS):
try:
amenities_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div[5]/div/div[2]/section/div[4]/a')
amenities_count = self.string_clean(amenities_elem.text, 'amenities')
product_dict['amenities_count'] = amenities_count
break
except Exception as e:
continue
# Product URL (str)
product_dict['url'] = product_url
# Catches if html hasn't been parsed properly due to loading lag, and re-runs the loop
if product_dict['Title'] == None \
or product_dict['Location'] == None\
or product_dict['url'] == None:
sleep(1 if self.slow_internet_speed else 0.25)
raise ValueError
else:
break
except Exception as e:
continue
if message:
if image_data:
print(f'Logged product "{title}" as {ID}. Images found: {len(image_data)}')
else:
print(f'Logged product "{title}" as {ID}. FAILED TO SAVE IMAGES.')
return product_dict, image_data
def scrape_all(self, sample : bool = False):
"""Crawls through the entire "I'm Feeling Lucky section" of Airbnb and collects structured and unstructured data from each product.
Structured data is stored in the form of a pandas dataframe, and unstructured data (images) are stored in a dictionary of corresponding
product IDs as keys, and tuples of source links for each product as the values.
Parameters
----------
sample : bool, default=True
Scraping the entirety of Airbnb's products hub is a large task. The `sample` logic, when set to true, severely restricts the number of products
that the crawler will try to scrape, in the event that one simply wishes to only scrape a few products, or quickly test that the module is functioning.
Returns
-------
df : pandas.DataFrame
The pandas dataframe containing all of the information for each product scraped in a neat and structured fashion.
image_dict : dict of {int : tuple of (str, str, ...)}
Image data is stored in a dictionary of corresponding product IDs as keys, and tuples of source links for each product as the values.
"""
# Primary key, pandas dataframe and a missing data count initialised
#ID = 1000
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
"""
analyse Elasticsearch query
"""
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from datetime import datetime
# Preprocess terms for TF-IDF
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
# LOG
import logging
from logging.handlers import RotatingFileHandler
# Word embedding for evaluation
from sentence_transformers import SentenceTransformer
from sklearn.manifold import TSNE
import seaborn as sns
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
import scipy.spatial as sp
# Spatial entity as descriptor :
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
# venn
from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud
import operator
# Global var on Levels on spatial and temporal axis
spatialLevels = ['city', 'state', 'country']
temporalLevels = ['day', 'week', 'month', 'period']
def elasticsearch_query(query_fname, logger):
"""
Build a ES query and return a default dict with resuls
:return: tweetsByCityAndDate
"""
# Elastic search credentials
client = Elasticsearch("http://localhost:9200")
es_logger.setLevel(logging.WARNING)
index = "twitter"
# Define a Query
query = open(query_fname, "r").read()
result = Elasticsearch.search(client, index=index, body=query, scroll='2m', size=5000)
# Append all pages form scroll search : avoid the 10k limitation of ElasticSearch
results = avoid10kquerylimitation(result, client, logger)
# Initiate a dict for each city append all Tweets content
tweetsByCityAndDate = defaultdict(list)
for hits in results:
# parse Java date : EEE MMM dd HH:mm:ss Z yyyy
inDate = hits["_source"]["created_at"]
parseDate = datetime.strptime(inDate, "%a %b %d %H:%M:%S %z %Y")
try:# geodocing may be bad
geocoding = hits["_source"]["rest"]["features"][0]["properties"]
except:
continue # skip this iteraction
if "country" in hits["_source"]["rest"]["features"][0]["properties"]:
# locaties do not necessarily have an associated stated
try:
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["state"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no state in geocoding
try:
logger.debug(hits["_source"]["rest"]["features"][0]["properties"]["city"] + " has no state")
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no city as well : only country
# print(json.dumps(hits["_source"], indent=4))
try: #
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except:
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str("none")
try:
tweetsByCityAndDate[cityStateCountry].append(
{
"tweet": preprocessTweets(hits["_source"]["full_text"]),
"created_at": parseDate
}
)
except:
print(json.dumps(hits["_source"], indent=4))
# biotexInputBuilder(tweetsByCityAndDate)
# pprint(tweetsByCityAndDate)
return tweetsByCityAndDate
def avoid10kquerylimitation(result, client, logger):
"""
Elasticsearch limit results of query at 10 000. To avoid this limit, we need to paginate results and scroll
This method append all pages form scroll search
:param result: a result of a ElasticSearcg query
:return:
"""
scroll_size = result['hits']['total']["value"]
logger.info("Number of elasticsearch scroll: " + str(scroll_size))
results = []
# Progress bar
pbar = tqdm(total=scroll_size)
while (scroll_size > 0):
try:
scroll_id = result['_scroll_id']
res = client.scroll(scroll_id=scroll_id, scroll='60s')
results += res['hits']['hits']
scroll_size = len(res['hits']['hits'])
pbar.update(scroll_size)
except:
pbar.close()
logger.error("elasticsearch search scroll failed")
break
pbar.close()
return results
def preprocessTweets(text):
"""
1 - Clean up tweets text cf : https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1
2 - Detection lang
3 - remove stopword ??
:param text:
:return: list : texclean, and langue detected
"""
## 1 clean up twetts
# remove URLs
textclean = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', text)
textclean = re.sub(r'http\S+', '', textclean)
# remove usernames
# textclean = re.sub('@[^\s]+', '', textclean)
# remove the # in #hashtag
# textclean = re.sub(r'#([^\s]+)', r'\1', textclean)
return textclean
def matrixOccurenceBuilder(tweetsofcity, matrixAggDay_fout, matrixOccurence_fout, save_intermediaire_files, logger):
"""
Create a matrix of :
- line : (city,day)
- column : terms
- value of cells : TF (term frequency)
Help found here :
http://www.xavierdupre.fr/app/papierstat/helpsphinx/notebooks/artificiel_tokenize_features.html
https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
:param tweetsofcity:
:param matrixAggDay_fout: file to save
:param matrixOccurence_fout: file to save
:return:
"""
# initiate matrix of tweets aggregate by day
# col = ['city', 'day', 'tweetsList', 'bow']
col = ['city', 'day', 'tweetsList']
matrixAggDay = pd.DataFrame(columns=col)
cityDayList = []
logger.info("start full_text concatenation for city & day")
pbar = tqdm(total=len(tweetsofcity))
for city in tweetsofcity:
# create a table with 2 columns : tweet and created_at for a specific city
matrix = pd.DataFrame(tweetsofcity[city])
# Aggregate list of tweets by single day for specifics cities
## Loop on days for a city
period = matrix['created_at'].dt.date
period = period.unique()
period.sort()
for day in period:
# aggregate city and date document
document = '. \n'.join(matrix.loc[matrix['created_at'].dt.date == day]['tweet'].tolist())
# Bag of Words and preprocces
# preproccesFullText = preprocessTerms(document)
tweetsOfDayAndCity = {
'city': city,
'day': day,
'tweetsList': document
}
cityDayList.append(city + "_" + str(day))
try:
matrixAggDay = matrixAggDay.append(tweetsOfDayAndCity, ignore_index=True)
except:
print("full_text empty after pre-process: "+document)
continue
pbar.update(1)
pbar.close()
if save_intermediaire_files:
logger.info("Saving file: matrix of full_text concatenated by day & city: "+str(matrixAggDay_fout))
matrixAggDay.to_csv(matrixAggDay_fout)
# Count terms with sci-kit learn
cd = CountVectorizer(
stop_words='english',
#preprocessor=sklearn_vectorizer_no_number_preprocessor,
#min_df=2, # token at least present in 2 cities : reduce size of matrix
max_features=25000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#@]+', #remove user name, i.e term starting with @ for personnal data issue
# strip_accents= "ascii" # remove token with special character (trying to keep only english word)
)
cd.fit(matrixAggDay['tweetsList'])
res = cd.transform(matrixAggDay["tweetsList"])
countTerms = res.todense()
# create matrix
## get terms :
# voc = cd.vocabulary_
# listOfTerms = {term for term, index in sorted(voc.items(), key=lambda item: item[1])}
listOfTerms = cd.get_feature_names()
##initiate matrix with count for each terms
matrixOccurence = pd.DataFrame(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms)
# save to file
if save_intermediaire_files:
logger.info("Saving file: occurence of term: "+str(matrixOccurence_fout))
matrixOccurence.to_csv(matrixOccurence_fout)
return matrixOccurence
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
###Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
##period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def HTFIDF(matrixOcc, matrixHTFIDF_fname, biggestHTFIDFscore_fname, listOfcities='all', spatialLevel='city',
period='all', temporalLevel='day'):
"""
Aggregate on spatial and temporel and then compute TF-IDF
:param matrixOcc: Matrix with TF already compute
:param listOfcities: filter on this cities
:param spatialLevel: city / state / country / world
:param period: Filter on this period
:param temporalLevel: day / week (month have to be implemented)
:return:
"""
matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfcities,
spatialLevel='state', period=period)
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
if temporalLevel == 'day':
## In space
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("state").sum()
elif spatialLevel == 'country' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("country").sum()
elif temporalLevel == "week":
matrixOcc.date = pd.to_datetime((matrixOcc.date)) - pd.to_timedelta(7, unit='d')# convert date into datetime
## in space and time
if spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="W")]).sum()
# Compute TF-IDF
## compute TF : for each doc, devide count by Sum of all count
### Sum fo all count by row
matrixOcc['sumCount'] = matrixOcc.sum(axis=1)
### Devide each cell by these sums
listOfTerms = matrixOcc.keys()
matrixOcc = matrixOcc.loc[:, listOfTerms].div(matrixOcc['sumCount'], axis=0)
## Compute IDF : create a vector of length = nb of termes with IDF value
idf = pd.Series(index=matrixOcc.keys(), dtype=float)
### N : nb of doucments <=> nb of rows :
N = matrixOcc.shape[0]
### DFt : nb of document that contains the term
DFt = matrixOcc.astype(bool).sum(axis=0) # Tip : convert all value in boolean. float O,O will be False, other True
#### Not a Number when value 0 because otherwise log is infinite
DFt.replace(0, np.nan, inplace=True)
### compute log(N/DFt)
idf = np.log10(N / (DFt))
# idf = np.log10( N / (DFt * 10))
## compute TF-IDF
matrixTFIDF = matrixOcc * idf
# matrixTFIDF = matrixOcc * idf * idf
## remove terms if for all documents value are Nan
matrixTFIDF.dropna(axis=1, how='all', inplace=True)
# Save file
matrixTFIDF.to_csv(matrixHTFIDF_fname)
# Export N biggest TF-IDF score:
top_n = 500
extractBiggest = pd.DataFrame(index=matrixTFIDF.index, columns=range(0, top_n))
for row in matrixTFIDF.index:
try:
row_without_zero = matrixTFIDF.loc[row]# we remove term with a score = 0
row_without_zero = row_without_zero[ row_without_zero !=0 ]
try:
extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys()
except:
extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys()
except:
logger.debug("H-TFIDF: city "+str(matrixTFIDF.loc[row].name)+ "not enough terms")
extractBiggest.to_csv(biggestHTFIDFscore_fname+".old.csv")
# Transpose this table in order to share the same structure with TF-IDF classifical biggest score :
hbt = pd.DataFrame()
extractBiggest = extractBiggest.reset_index()
for index, row in extractBiggest.iterrows():
hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"])
hbtrow[spatialLevel] = row[spatialLevel]
hbtrow["date"] = row["date"]
hbt = hbt.append(hbtrow, ignore_index=True)
hbt.to_csv(biggestHTFIDFscore_fname)
def TFIDF_TF_with_corpus_state(elastic_query_fname, logger, save_intermediaire_files, nb_biggest_terms=500, path_for_filesaved="./",
spatial_hiearchy="country", temporal_period='all', listOfCities='all'):
"""
Compute TFIDF and TF from an elastic query file
1 doc = 1 tweet
Corpus = by hiearchy level, i.e. : state or country
:param elastic_query_fname: filename and path of the elastic query
:param logger: logger of the main program
:param nb_biggest_terms: How many biggest term are to keep
:param spatial_hiearchy: define the size of the corpus : state or country
:param temporal_period:
:param listOfCities: If you want to filter out some cities, you can
:return:
"""
# tfidfStartDate = date(2020, 1, 23)
# tfidfEndDate = date(2020, 1, 30)
# temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate)
# listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# listOfState = ["England", "Scotland", "Northern Ireland", "Wales"]
tweets = elasticsearch_query(elastic_query_fname, logger)
if listOfCities == 'all':
listOfCities = []
listOfStates = []
listOfCountry = []
for triple in tweets:
splitted = triple.split("_")
listOfCities.append(splitted[0])
listOfStates.append(splitted[1])
listOfCountry.append(splitted[2])
listOfCities = list(set(listOfCities))
listOfStates = list(set(listOfStates))
listOfCountry = list(set(listOfCountry))
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
matrixAllTweets = pd.DataFrame()
for tweetByCity in tweets.keys():
# Filter cities :
city = str(tweetByCity).split("_")[0]
state = str(tweetByCity).split("_")[1]
country = str(tweetByCity).split("_")[2]
if city in listOfCities:
matrix = pd.DataFrame(tweets[tweetByCity])
matrix['city'] = city
matrix['state'] = state
matrix['country'] = country
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
if temporal_period != "all":
mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
# Compute TF-IDF and TF by state
extractBiggestTF_allstates = pd.DataFrame()
extractBiggestTFIDF_allstates = pd.DataFrame()
if spatial_hiearchy == "country":
listOfLocalities = listOfCountry
elif spatial_hiearchy == "state":
listOfLocalities = listOfStates
elif spatial_hiearchy == "city":
listOfLocalities = listOfCities
for locality in listOfLocalities:
matrix_by_locality = matrixAllTweets[matrixAllTweets[spatial_hiearchy] == locality]
vectorizer = TfidfVectorizer(
stop_words='english',
min_df=0.001,
# max_features=50000,
ngram_range=(1, 1),
token_pattern='[<KEY>',
)
# logger.info("Compute TF-IDF on corpus = "+spatial_hiearchy)
try:
vectors = vectorizer.fit_transform(matrix_by_locality['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
except:
logger.info("Impossible to compute TF-IDF on: "+locality)
continue
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
locality_format = locality.replace("/", "_")
locality_format = locality_format.replace(" ", "_")
if save_intermediaire_files:
logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv")
TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv")
## Extract N TOP ranking score
extractBiggest = TFIDFClassical.max().nlargest(nb_biggest_terms)
extractBiggest = extractBiggest.to_frame()
extractBiggest = extractBiggest.reset_index()
extractBiggest.columns = ['terms', 'score']
extractBiggest[spatial_hiearchy] = locality
extractBiggestTFIDF_allstates = extractBiggestTFIDF_allstates.append(extractBiggest, ignore_index=True)
"""
# Compute TF
tf = CountVectorizer(
stop_words='english',
min_df=2,
ngram_range=(1,2),
token_pattern='[a-zA-Z0-9@#]+',
)
try:
tf.fit(matrix_by_locality['tweet'])
tf_res = tf.transform(matrix_by_locality['tweet'])
listOfTermsTF = tf.get_feature_names()
countTerms = tf_res.todense()
except:# locality does not have enough different term
logger.info("Impossible to compute TF on: "+locality)
continue
## matrixTF
TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF)
### save in file
logger.info("saving TF File: "+path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv")
TFClassical.to_csv(path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv")
## Extract N TOP ranking score
extractBiggestTF = TFClassical.max().nlargest(nb_biggest_terms)
extractBiggestTF = extractBiggestTF.to_frame()
extractBiggestTF = extractBiggestTF.reset_index()
extractBiggestTF.columns = ['terms', 'score']
extractBiggestTF[spatial_hiearchy] = locality
extractBiggestTF_allstates = extractBiggestTF_allstates.append(extractBiggestTF, ignore_index=True)
"""
logger.info("saving TF and TF-IDF top"+str(nb_biggest_terms)+" biggest score")
extractBiggestTF_allstates.to_csv(path_for_filesaved+"/TF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv")
extractBiggestTFIDF_allstates.to_csv(path_for_filesaved+"/TF-IDF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv")
def TFIDF_TF_on_whole_corpus(elastic_query_fname, logger, save_intermediaire_files, path_for_filesaved="./",
temporal_period='all', listOfCities='all'):
"""
Compute TFIDF and TF from an elastic query file
1 doc = 1 tweet
Corpus = on the whole elastic query (with filter out cities that are not in listOfCities
:param elastic_query_fname: filename and path of the elastic query
:param logger: logger of the main program
:param nb_biggest_terms: How many biggest term are to keep. It has to be greater than H-TF-IDF or
TF-IDF classical on corpus = localité because a lot of temrs have 1.0 has the score
:param spatial_hiearchy: define the size of the corpus : state or country
:param temporal_period:
:param listOfCities: If you want to filter out some cities, you can
:return:
"""
# tfidfStartDate = date(2020, 1, 23)
# tfidfEndDate = date(2020, 1, 30)
# temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate)
# listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# listOfState = ["England", "Scotland", "Northern Ireland", "Wales"]
# Query Elasticsearch to get all tweets from UK
tweets = elasticsearch_query(elastic_query_fname, logger)
if listOfCities == 'all':
listOfCities = []
listOfStates = []
listOfCountry = []
for triple in tweets:
splitted = triple.split("_")
listOfCities.append(splitted[0])
listOfStates.append(splitted[1])
listOfCountry.append(splitted[2])
listOfCities = list(set(listOfCities))
listOfStates = list(set(listOfStates))
listOfCountry = list(set(listOfCountry))
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
matrixAllTweets = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import math
import time
import pandas as pd
import sys
import matplotlib.pyplot as plt
train_path = sys.argv[1]+'train_data_shuffled.csv'
test_path = sys.argv[1]+'public_test.csv'
output_path = sys.argv[2]
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 20}
plt.rc('font', **font)
plt.rcParams['figure.figsize'] = (15,15)
start = time.time()
#test_labels_path = sys.argv[1]+'toy_dataset_test_labels.csv'
class NeuralNet:
def __init__(self,seed):
seed = seed
np.random.seed(seed)
class Layer:
def __init__(self, in_size, out_size):
#optimizer hyperparameters
self.gamma = 0.9
self.v = 0
self.nv = 0
self.ngamma = 0.9
self.beta = 0.9
self.eps = 10**(-8)
self.E = 0
self.am = 0
self.av = 0
self.beta1 = 0.9
self.beta2 = 0.99
self.epsa = 10**(-8)
self.nam = 0
self.nav = 0
self.nbeta1 = 0.9
self.nbeta2 = 0.99
self.epsna = 10**(-8)
self.in_size = in_size # dim(x_l) = m
self.out_size = out_size # dim(y_l) = n
# Xavier Initializtion of weights for a layer # dim(w_l) = (m+1)*n
self.w = np.float32(np.random.normal(0,1,size=(in_size+1, out_size)) * np.sqrt(2/(in_size + out_size + 1)))
self.w = np.float64(self.w)
def forward(self, input,lr_mode): # forward pass
input = np.insert(input,0,np.ones(input.shape[0]),axis=1) # append 1 to input
self.input = input
if lr_mode == 2:
self.w = self.w - self.ngamma*self.nv
#print(input.shape,self.w.shape)
return np.dot(input,self.w)
def backward(self, out_error, learning_rate,lr_mode,iter_num): # backward pass
w_temp = np.delete(self.w,0,0) # remove first row (bias) from w
in_error = np.dot(out_error, w_temp.T)
w_error = np.dot(self.input.T, out_error)
if lr_mode == 1: # momentum
self.v = self.gamma*self.v+learning_rate*w_error
self.w -= self.v
elif lr_mode == 2: # nesterov
self.nv = self.ngamma*self.nv+learning_rate*w_error
self.w -= self.nv
elif lr_mode == 3: # rmsprop
self.E = self.beta*self.E + (1-self.beta)*(w_error**2)
self.w -= learning_rate*w_error/(np.sqrt(self.eps+self.E))
elif lr_mode == 4: # adam
self.am = self.beta1*self.am + (1-self.beta1)*(w_error)
am = self.am/(1-self.beta1**iter_num)
self.av = self.beta2*self.av + (1-self.beta2)*(w_error**2)
av = self.av/(1-self.beta2**iter_num)
#print(self.am.shape,self.av.shape)
self.w -= learning_rate*(am/np.sqrt(self.epsa+av))
elif lr_mode == 5: # nadam
self.nam = self.nbeta1*self.nam + (1-self.nbeta1)*(w_error)
nam = self.nam/(1-self.nbeta1**iter_num)
self.nav = self.nbeta2*self.nav + (1-self.nbeta2)*(w_error**2)
nav = self.nav/(1-self.nbeta2**iter_num)
grad_upd = self.nbeta1*nam + (1-self.nbeta1)*(w_error)/(1-self.nbeta1**iter_num)
self.w -= learning_rate*(grad_upd)/(np.sqrt(self.epsna+nav))
else:
self.w -= learning_rate*w_error
return in_error
########################################################################################################
class ActivationLayer:
def __init__(self, act_fun, act_prime):
self.act_fun = act_fun #activation function
self.act_prime = act_prime #derivative of activation function
def forward(self, input,lr_mode):
self.input = input
#print(self.act_fun(input))
return self.act_fun(input) #activation of linear input
def backward(self, out_error,lr,lr_mode,iter_num):
return np.multiply(out_error, self.act_prime(self.input)) #derivative w.r.t. activation function
########################################################################################################
class SoftmaxLayer:
def __init__(self, in_size):
self.in_size = in_size
def forward(self, input,lr_mode):
self.input = input
v = np.amax(input,axis=1,keepdims=True)
#print(input-v)
tmp = np.exp(input-v)
tmp = tmp / np.sum(tmp,axis=1,keepdims=True)
self.output = tmp
return self.output
def backward(self, out_error,lr,lr_mode,iter_num):
return out_error
########################################################################################################
###################################################################################################
# Some activation functions and their derivatives
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
z = sigmoid(x)
return z*(1-z)
def tanh(x):
return np.tanh(x)
def tanh_prime(x):
return 1 - (np.tanh(x))**2
def relu(x):
return np.maximum(x, 0)
def relu_prime(x):
return np.array(x >= 0).astype('int')
###################################################################################################
#Some Error functions and their derivatives
def mse(pred, y):
return np.mean(np.power(pred - y, 2))
def mse_prime(pred, y):
return (pred - y) / y.shape[0]
def cee(pred,y):
#pred = np.clip(pred,a_min=10**(-15),a_max=10**15)
v = np.log(np.sum(pred*y,axis=1,keepdims=True))
return abs(np.sum(v)/y.shape[0])
def cee_prime(pred,y):
return (pred-y)/y.shape[0]
###################################################################################################
# mapping from int to loss functions/activation functions
loss_fun_dict = {
0:cee,
1:mse
}
loss_fun_prime_dict = {
0:cee_prime,
1:mse_prime
}
act_fun_dict = {
0:sigmoid,
1:tanh,
2:relu
}
act_fun_prime_dict = {
0:sigmoid_prime,
1:tanh_prime,
2:relu_prime
}
opt_method_dict = {
0:'Vanilla SGD',
1:'Momentum',
2:'Nesterov',
3:'RMSProp',
4:'Adam',
5:'Nadam'
}
act_dict = {
0:'log-sigmoid',
1:'tanh',
2:'ReLU'
}
###################################################################################################
def accuracy(x,y):
x = np.argmax(x,axis=1)
y = np.argmax(y,axis=1)
c=0
for i in range(x.shape[0]):
if x[i]==y[i]:
c+=1
return c/x.shape[0]
def pred_model(net,X_test,Y_test=None):
output = X_test
for layer in net: #forward pass
output = layer.forward(output)
error = cee(output,Y_test) #error calculation
acc = accuracy(output,Y_test)
print(error,acc)
return np.argmax(output,axis=1)
###################################################################################################
def train_model(net,X_train,Y_train,X_test,Y_test,epochs,batchsize,lr,lr_strat,loss_fun,lr_mode=0,details=False,interval=1):
batchnum = X_train.shape[0]//batchsize
lr_ = lr
#lr_mode = 0
errs = []
for i in range(epochs):
err=0
if lr_strat==1:
lr_ = lr/np.sqrt(i+1)
for n in range(batchnum):
mini_x_train = X_train[n*batchsize:(n+1)*batchsize]
mini_y_train = Y_train[n*batchsize:(n+1)*batchsize]
iter_num = batchnum*i+n+1
#print(iter_num)
output = mini_x_train
for layer in net: #forward pass
output = layer.forward(output,lr_mode)
#print(output)
err += loss_fun_dict[loss_fun](output,mini_y_train) #error calculation
out_error = loss_fun_prime_dict[loss_fun](output,mini_y_train) #derivative of error
for layer in reversed(net): #backward pass
out_error = layer.backward(out_error,lr_,lr_mode,iter_num)
if details:
output1 = X_train
for layer in net: #forward pass
output1 = layer.forward(output1,lr_mode)
error = loss_fun_dict[loss_fun](output1,Y_train) #error calculation
output = X_test #final prediction
for layer in net:
output = layer.forward(output,lr_mode)
test_acc = accuracy(output,Y_test)
errs.append(test_acc)
if (i+1)%interval==0:
print(i+1,error,accuracy(output1,Y_train),test_acc)
return errs
###################################################################################################
df = pd.read_csv(train_path,header=None)
test_df = | pd.read_csv(test_path,header=None) | pandas.read_csv |
import argparse
import os
import pandas as pd
from decimal import Decimal
from util.filesystem import ensure_file, read_cache_pickle, write_cache_pickle
from util.pre_processing import get_system_calls_metadata, drop_duplicates, get_system_calls
class FrequencyVectorPreProcessor:
def __init__(self, input_file: str, args):
self.input = input_file
self.input_filename = os.path.basename(self.input).split('.')[0] # Get file name without extension
self.delta_t = Decimal(args.delta_t) / Decimal(1000) # convert to ms
self.drop_duplicates_mode = args.drop_duplicates_mode
self.id = f'{self.get_static_id(args)}_{self.input_filename}'
def pre_process(self):
cached_df = read_cache_pickle(self.id)
if cached_df is not None:
print(f'[+] Bags: {len(cached_df.index)}')
return cached_df
if not ensure_file(self.input):
raise Exception(f'Input file does not exist: {self.input}')
df = self.create_dataframe()
df, duplicates_dropped = drop_duplicates(df=df, mode=self.drop_duplicates_mode)
print(f'[+] DataFrame created (rows={len(df)}, duplicates_dropped={duplicates_dropped})')
write_cache_pickle(self.id, df=df)
return df
def create_dataframe(self) -> pd.DataFrame:
num_calls, unique_calls = get_system_calls_metadata(self.input)
print(f'[+] System calls: {num_calls}')
print(f'[+] Unique calls: {len(unique_calls)}')
bags = self.create_bags(unique_syscalls=unique_calls)
print(f'[+] Bags: {len(bags)}')
return | pd.DataFrame(bags) | pandas.DataFrame |
import streamlit as st
from PIL import Image
import time
import pandas as pd
import base64
from modules import (ensemble, utility)
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import warnings
from statsmodels.tsa.seasonal import seasonal_decompose
warnings.filterwarnings('ignore')
############################# Function require to download information later on
def download_link(object_to_download, download_filename, download_link_text):
if isinstance(object_to_download,pd.DataFrame):
object_to_download = object_to_download.to_csv(index=False)
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
############################# Initial set up of sidebar and upload
df = pd.DataFrame()
st.set_page_config(page_title='emergency predict', layout='wide', initial_sidebar_state='auto')
st.image(Image.open('images/emergency logo.png'), width = 400)
st.sidebar.markdown("Open Source NHS Data Science Library")
# General page formatting and set-up
uploaded_file = st.sidebar.file_uploader("Choose a file",type=('xlsx','csv'))
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
if 'ds' not in df:
st.warning("Please name your target date column ds within your uploaded data to continue")
st.stop()
df = df.set_index('ds')
target = st.sidebar.selectbox('Choose column you would like to forecast',df.select_dtypes(include=['int16', 'int32', 'int64', 'float16', 'float32', 'float64']).columns.tolist(), help = 'The programme will automatically find columns that can be forecasted, just select from this list when you have imported a dataset')
st.sidebar.text(''' ''')
hori = st.sidebar.number_input("Choose your forecast horizon",value = 1, min_value = 1, max_value=999, step = 0, help = 'The horizon refers to how many units you want to forecast, i.e. if you want to forecast 7 days this number would be 7')
st.sidebar.text(''' ''')
ram = st.sidebar.selectbox('Choose forecast model',['ensemble','individualised'], help = 'Picking ensemble will combine the models to produce 1 forecast, individualised will split these out into 3 forecasts')
st.sidebar.text(''' ''')
pi = st.sidebar.selectbox('Pick Prediction Intervals',['90%','80%','60%'], help ='With a 90% prediction interval, the graph will plot an upper and lower line to represent where 90% of the data will fall')
st.sidebar.text(''' ''')
crossval = st.sidebar.selectbox("Include the Cross Validation results", ['No', 'Yes'], help ='Cross validation will show you the forecasts performance. However, it can be a lengthly process (typically 2-3 minutes), so if you are already happy with your model you may choose to choose No to avoid running it.')
st.sidebar.text(''' ''')
r = st.sidebar.button('Run Forecast Model')
st.markdown('The emergency forecast tool is an open source application to predict ambulance service demand. Built in collaboration with the University of Exeter and referenced in the paper titled: Forecasting the daily demand for emergency medical ambulances in England and Wales: A benchmark model and external validation. By <NAME>, <NAME>, <NAME>, <NAME> and <NAME>.')
############################# beginning of the data input section
if uploaded_file is None:
st.warning("Please upload a .csv or .xlsx file to begin forecasting")
else:
st.info("Data loaded! Begin forecasting by clicking Run Forecast Model in the Banner bar")
if r == True:
if ram == 'ensemble':
ram = False
else:
False
if pi == '90%':
pi = 0.1
elif pi == '80%':
pi = 0.2
elif pi == '60%':
pi = 0.4
############################# beginning of the data input section
with st.beta_expander("Show Input Data"):
st.markdown('''This is an opportunity to review your input data to ensure that NaN or egregious values do not contaminate your predictions. Clicking on the legend icons will allow you to filter out metrics. There is a date slider along the bottom should you wish to concentrate on a particular date period. Finally, in the top right corner you can choose to enter full screen mode, zoom, download, toggle tooltips and add sparklines. To restart your predictions click on the x symbol on the left hand pane.''')
if uploaded_file is not None:
with st.spinner('Uploading data'):
df_input = df.reset_index(drop=False)
n = df_input.select_dtypes(include=['int16','int32','int64','float16','float32','float64'])
fig = make_subplots(
rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0.06,
specs=[[{"type": "table"}], [{"type": "scatter"}]]
)
for i in n.columns:
fig.add_trace(
go.Scatter(x=df_input["ds"],y=df_input[i], mode="lines", name=i),row=2, col=1)
fig.add_trace(
go.Table(
header=dict(
values=df_input.columns,
font=dict(size=10),
fill_color = 'rgba(19,24,31,0.5)',
line_color = 'rgba(255,255,255,0.2)',
font_color = '#F2F2F2',
align="left"
),
cells=dict(
values=[df_input[k].tolist() for k in df_input.columns],
align = "left",
fill_color = 'rgba(47,58,75,0.5)',
line_color = 'rgba(255,255,255,0.2)',
font_color = '#F2F2F2',)
),
row=1, col=1
)
fig.update_xaxes(color='#F2F2F2', gridcolor = 'rgba(255,255,255,0.2)')
fig.update_yaxes(color='#F2F2F2', gridcolor = 'rgba(255,255,255,0.2)')
fig.update_layout(
height=1000,
showlegend=True,
title_text="Time Series Analysis",
paper_bgcolor='rgba(34,42,55,1)',
title_font_color='rgba(255,255,255,1)',
modebar_color='rgba(255,255,255,1)',
plot_bgcolor='rgba(47,58,75,0.5)',
legend_font_color='rgba(255,255,255,1)',
colorway=['#E29D89','#46AFF6','#096F64','#3993BA','#02DAC5','#FC5523','#CF6679'],
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
rangeslider=dict(
visible=True
),
type="date"
)
)
st.plotly_chart(fig, use_container_width=True)
with st.spinner('Data Loading Complete!'):
time.sleep(1)
############################# Beginning of the forecast model section
with st.spinner('Processing Forecast Model'):
with st.beta_expander("Forecast Model"):
st.markdown('Our chosen benchmark method, based on performance, is an ensemble (a simple average) of Facebooks Prophet and Regression with ARIMA errors. Both methods are flexible enough to add in special calendar events such as national holidays. In our model we chose to include new years day as this clearly stood out in the time series. In our regression model, we model the error process using the same ARIMA model- (1, 1, 3)(1, 0, 1, 7) - for each sub region. EMS providers in different regions may wish to experiment with alternative error processes.')
st.markdown('Our cross-validation demonstrated that performance of the ensemble was superior to either method on its own, the other candidate models and a naive benchmark. However, we note that Prophet is also a reasonable choice for ambulance trusts new to forecasting (albeit they should recognise the shortcomings in terms of coverage). We emphasise the critical importance of a naive benchmark such as seasonal naïve in cross-validation to confirm that more complex models add value. We found that over our forecast horizon seasonal naive outperformed several state-of-the-art forecasting techniques.')
st.markdown("Read the full study ""https://osf.io/a6nu5")
if uploaded_file is not None:
st.markdown("This section is the processed forecast model using the fbprophet and Linear Regression with ARIMA errors ensemble. You have similiar filter options to the first section. Within this view yhat represents the forecast and your target variable historical information is plotted adjecent. Null values will display where the outputs overlap.")
fcstdf = df
fcstdf.index.freq = 'D'
model = utility.default_ensemble()
model.fit(fcstdf[target])
forecast_frame = model.predict(horizon=hori, return_all_models = ram, alpha = pi)
result = pd.concat([fcstdf[target], forecast_frame], axis=1)
fig = make_subplots(rows=2, cols=1,shared_xaxes=True,vertical_spacing=0.06, specs=[[{"type": "table"}], [{"type": "scatter"}]])
result.reset_index(drop=False, inplace = True)
result['ds'] = pd.to_datetime(result['ds'])
result['ds'] = result['ds'].dt.date
resultnumerical = result.select_dtypes(include=['int16','int32','int64','float16','float32','float64'])
for i in resultnumerical.columns:
fig.add_trace(
go.Scatter(x=result['ds'],y=result[i], mode="lines", name=i),row=2, col=1)
fig.add_trace(
go.Table(
header=dict(
values=result.columns,
font=dict(size=10),
fill_color = 'rgba(19,24,31,0.5)',
line_color = 'rgba(255,255,255,0.2)',
font_color = '#F2F2F2',
align="left"
),
cells=dict(
values=[result[k].tolist() for k in result.columns],
align = "left",
fill_color = 'rgba(47,58,75,0.5)',
line_color = 'rgba(255,255,255,0.2)',
font_color = '#F2F2F2',)
),
row=1, col=1
)
fig.update_xaxes(color='#F2F2F2', gridcolor = 'rgba(255,255,255,0.2)')
fig.update_yaxes(color='#F2F2F2', gridcolor = 'rgba(255,255,255,0.2)')
fig.update_layout(
height=1000,
showlegend=True,
title_text="Ensemble Forecast Model",
paper_bgcolor='rgba(34,42,55,1)',
title_font_color='rgba(255,255,255,1)',
modebar_color='rgba(255,255,255,1)',
plot_bgcolor='rgba(47,58,75,0.5)',
legend_font_color='rgba(255,255,255,1)',
colorway=['#E29D89','#46AFF6','#096F64','#3993BA','#02DAC5','#FC5523','#CF6679'],
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
rangeslider=dict(
visible=True
),
type="date"
)
)
st.plotly_chart(fig, use_container_width=True)
if st.button('Save this forecast'):
download_link(df, 'forecastoutput.csv', 'Click here to download data!')
with st.spinner('Forecast Model Built!'):
time.sleep(1)
############################# beginning of the decomposition section
with st.spinner('Processing Decomposition'):
with st.beta_expander("View Decomposition"):
st.markdown('Decomposition shows a breakdown of the core components of the forecast model, in this application this is seasonality (a pattern in a time series that repeats in a regular way)i, trend (the growth either positive or negative over time).')
st.markdown('https://www.oxfordreference.com/view/10.1093/acref/9780199541454.001.0001/acref-9780199541454')
if uploaded_file is not None:
st.markdown('''Seasonal decomposition aims to break down the individualised components that made up the above forecasts. For more information view the statsmodels seasonal decomposition page.''')
sd = df
sd.reset_index(drop = False, inplace = True)
sd['ds'] = pd.to_datetime(sd['ds'])
sd = sd.set_index('ds')
sd.index.freq = 'D'
sdresult = seasonal_decompose(sd['Responses'], model='additive')
it = ['resid','seasonal','trend']
fig = make_subplots(
rows=3, cols=1,
shared_xaxes=True,
vertical_spacing=0.06,
specs=[[{"type": "scatter"}],[{"type": "scatter"}],[{"type": "scatter"}]] #need to enumerate to create the necessary row plots
)
fig.add_trace(go.Scatter(x=sdresult.resid.index, y=sdresult.resid, mode="lines", name='residual'),row=1, col=1)
fig.add_trace(go.Scatter(x=sdresult.seasonal.index, y=sdresult.seasonal, mode="lines", name='seasonal'),row=2, col=1)
fig.add_trace(go.Scatter(x=sdresult.trend.index, y=sdresult.trend, mode="lines", name='trend'),row=3, col=1)
fig.update_xaxes(color='#F2F2F2', gridcolor = 'rgba(255,255,255,0.2)')
fig.update_yaxes(color='#F2F2F2', gridcolor = 'rgba(255,255,255,0.2)')
fig.update_layout(
height=1000,
showlegend=True,
title_text="Series Decomposition",
paper_bgcolor='rgba(34,42,55,1)',
title_font_color='rgba(255,255,255,1)',
modebar_color='rgba(255,255,255,1)',
plot_bgcolor='rgba(47,58,75,0.5)',
legend_font_color='rgba(255,255,255,1)',
colorway=['#E29D89','#46AFF6','#096F64','#3993BA','#02DAC5','#FC5523','#CF6679'],
)
st.plotly_chart(fig, use_container_width=True)
with st.spinner('Decomposition Built!'):
time.sleep(1)
############################# Beginning of the Cross Validation Section
with st.beta_expander("Cross Validation"):
st.markdown('A method of assessing the accuracy and validity of a statistical model. The available data are divided into two parts. Modelling of the data uses one part only. The model selected for this part is then used to predict the values in the other part of the data, a valid model should show good predictive accuracy. In this cross validation the measure is mean absolute error (MAE)i')
st.markdown('https://www.oxfordreference.com/view/10.1093/acref/9780199541454.001.0001/acref-9780199541454')
if uploaded_file is not None:
if crossval == 'No':
st.markdown("Change Cross Validation to Yes to see results")
else:
with st.spinner("Processing Cross Validation"):
cvdf = df
cvdf.reset_index(drop = False, inplace = True)
cvdf['ds'] = pd.to_datetime(cvdf['ds'])
cvdf = cvdf.set_index('ds')
if cvdf.shape[0] - 168 <= 0:
st.warning("You need to provide more training data to enable cross validation")
else:
max_horizon = 84
min_train_size = cvdf.shape[0] - 168
horizons = [day for day in range(7, max_horizon+7,7)]
max_horizon = max(horizons)
naive_df = ensemble.rolling_forecast_origin(cvdf[target], min_train_size = min_train_size, horizon = max_horizon, step=7)
naive = ensemble.cross_validation_score(ensemble.SNaive(7),
naive_df,
horizons = horizons,
metric = ensemble.mean_absolute_error,
n_jobs=-1)
mae = ensemble.ensemble_cross_val_score(model = utility.default_ensemble(),
data = cvdf[target],
horizons = horizons,
metric = ensemble.mean_absolute_error,
min_train_size=min_train_size
)
naiveresult = pd.DataFrame(naive, columns=horizons)
ensembleresult = | pd.DataFrame(mae, columns=horizons) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
from numpy.testing import assert_allclose
from pvlib import temperature
@pytest.fixture
def sapm_default():
return temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
def test_sapm_cell(sapm_default):
default = temperature.sapm_cell(900, 20, 5, sapm_default['a'],
sapm_default['b'], sapm_default['deltaT'])
assert_allclose(default, 43.509, 3)
def test_sapm_module(sapm_default):
default = temperature.sapm_module(900, 20, 5, sapm_default['a'],
sapm_default['b'])
assert_allclose(default, 40.809, 3)
def test_sapm_ndarray(sapm_default):
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = np.array([0., 23.06066166, 5.])
expected_module = np.array([0., 21.56066166, 5.])
assert_allclose(expected_cell, cell_temps, 3)
assert_allclose(expected_module, module_temps, 3)
def test_sapm_series(sapm_default):
times = pd.date_range(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = pd.Series([0., 23.06066166, 5.], index=times)
expected_module = pd.Series([0., 21.56066166, 5.], index=times)
assert_series_equal(expected_cell, cell_temps)
assert_series_equal(expected_module, module_temps)
def test_pvsyst_cell_default():
result = temperature.pvsyst_cell(900, 20, 5)
assert_allclose(result, 45.137, 0.001)
def test_pvsyst_cell_kwargs():
result = temperature.pvsyst_cell(900, 20, wind_speed=5.0, u_c=23.5,
u_v=6.25, eta_m=0.1)
assert_allclose(result, 33.315, 0.001)
def test_pvsyst_cell_ndarray():
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
result = temperature.pvsyst_cell(irrads, temps, wind_speed=winds)
expected = np.array([0.0, 23.96551, 5.0])
assert_allclose(expected, result, 3)
def test_pvsyst_cell_series():
times = pd.date_range(start="2015-01-01", end="2015-01-02", freq="12H")
temps = pd.Series([0, 10, 5], index=times)
irrads = | pd.Series([0, 500, 0], index=times) | pandas.Series |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Original tutorial came from https://www.youtube.com/watch?v=XQgXKtPSzUI
Author:
<NAME>
License:
MIT License
"""
import argparse
import datetime
import logging
import os
from typing import List, Iterable
from bs4.element import ResultSet, Tag
from bs4 import BeautifulSoup
from selenium import webdriver
import chromedriver_binary # Required import to give chromedriver PATH
import time
import pandas as pd
from collections import Counter
from selenium.common.exceptions import (
NoSuchElementException,
ElementClickInterceptedException,
StaleElementReferenceException,
)
def getArgs():
"""This method reads the arguments passed during using the command line interface.
Returns:
Dictionary of arguments passed
"""
parser = argparse.ArgumentParser(
description="Run Crawl Script to parse Hackathon.com website"
)
parser.add_argument(
"-y",
"--year",
help="Crawl for one particular year. Default is from current year to 5"
"years before",
)
parser.add_argument(
"-c",
"--country",
help="Crawl for another particular country. Default is Germany",
)
argvars = vars(parser.parse_args())
return argvars
def crawl_url(url: str) -> ResultSet(Tag):
"""This method uses Selenium WebDriver to run an automated Chrome Browser and crawl the page.
This is required due to the Javascript in the Hackathon website which needs XHR requests
to show all events in the year.
Args:
url: URL to be crawled
Returns:
BeautifulSoup ResultSet with relevant Page Source to be processed
Raises:
NoSuchElementException: When it can't find the More button anymore (Only for Debugging)
StaleElementReferenceException: Exception raised when the More button is not seen in window
"""
# Use selenium WebDriver to run an automated Chrome Browser.
# This is required due to the Javascript in the Hackathon website which needs XHR requests
# to show all events in the year.
driver = webdriver.Chrome()
driver.get(url)
# TODO: Use more efficient method for waiting.
time.sleep(2)
scroll_down(driver)
try:
more_button_xpath = "/html/body/div[6]/div[3]/div[3]/a"
more_button = driver.find_element_by_xpath(more_button_xpath)
except NoSuchElementException:
more_button_xpath = "/html/body/div[6]/div[2]/div[3]/a"
more_button = driver.find_element_by_xpath(more_button_xpath)
while True:
scroll_down(driver)
# TODO: Use more efficient method for waiting
time.sleep(0.7)
# TODO: optimize Try-Except
try:
driver.find_element_by_xpath(more_button_xpath)
except NoSuchElementException as e:
logging.debug(e)
break
try:
more_button.click()
except StaleElementReferenceException as e:
logging.error(e)
# Parse the read client by creating a BS4 object
s_page = BeautifulSoup(driver.page_source, "html.parser")
driver.quit()
# We find the elements at the right side of the page
container = s_page.find_all("div", {"class": ["ht-eb-card__right"]})
# "row ht-idt-card__right__container"]})
return container
def scroll_down(driver: webdriver.Chrome) -> None:
"""A method for scrolling the page.
Original code from https://stackoverflow.com/questions/48850974/selenium-scroll-to-end-of-page-indynamically-loading-webpage/48851166
# HACK: Due to JavaScript issue where the More Button cannot be clicked unless seen on window
Args:
driver: The Web Driver to run Chrome
"""
# Get scroll height.
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to the bottom.
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load the page.
time.sleep(1.5)
# Calculate new scroll height and compare with last scroll height.
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
def parse_keywords(container: ResultSet(Tag), year: int) -> pd.DataFrame:
"""This method parses all the keywords that had shown up on the page.
Args:
year: year of the parsed url for dataframe.
container: BeautifulSoup ResultSet with relevant Page Source to be processed
Returns:
A sorted Dataframe-Object with keywords, the year and its number of occurrences.
"""
keyword_list = [] # Type: str
final_list = [] # Type: Any
for tag in container:
tag_link_list = tag.find_all("a", {"class": "ht-card-tag"})
for tag_link in tag_link_list:
keyword_list.append(tag_link.contents[0])
for k, v in Counter(keyword_list).items():
final_list.append([year, k, v])
data_frame = pd.DataFrame(final_list, columns=["Year", "Tag", "Count"])
return data_frame
def parse_cities(container: ResultSet(Tag), year: int) -> pd.DataFrame:
"""This method parses all the cities that had shown up on the page.
Args:
year: year of the parsed url for dataframe.
container: BeautifulSoup ResultSet with relevant Page Source to be processed
Returns:
A sorted Dataframe-Object with city names, the year and its number of occurrences.
"""
keyword_list = [] # Type: str
final_list = [] # Type: Any
for tag in container:
tag_link_list = tag.find_all("span", {"class": "ht-eb-card__location__place"})
for tag_link in tag_link_list:
keyword_list.append(tag_link.contents[0])
for k, v in Counter(keyword_list).items():
final_list.append([year, k, v])
data_frame = | pd.DataFrame(final_list, columns=["Year", "City", "Count"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Defines the ForecastModel class, which encapsulates model functions used in
forecast model fitting, as well as their number of parameters and
initialisation parameters.
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python style guide
# http://sphinx.pocoo.org/rest.html - Use Restructured Text for
# docstrings
# -- Public Imports
import itertools
import logging
import numpy as np
import pandas as pd
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, \
MO, nearest_workday, next_monday, next_monday_or_tuesday, \
GoodFriday, EasterMonday, USFederalHolidayCalendar
from pandas.tseries.offsets import DateOffset
from datetime import datetime
# -- Private Imports
from anticipy import model_utils
# -- Globals
logger = logging.getLogger(__name__)
# Fourier model configuration
_dict_fourier_config = { # Default configuration for fourier-based models
'period': 365.25, # days in year
'harmonics': 10 # TODO: evaluate different harmonics values
}
_FOURIER_PERIOD = 365.25
_FOURIER_HARMONICS = 10 # TODO: evaluate different harmonics values
_FOURIER_K = (2.0 * np.pi / _FOURIER_PERIOD)
_FOURIER_I = np.arange(1, _FOURIER_HARMONICS + 1)
_FOURIER_DATE_ORIGIN = datetime(1970, 1, 1)
# -- Functions
# ---- Utility functions
def logger_info(msg, data):
# Convenience function for easier log typing
logger.info(msg + '\n%s', data)
def _get_f_init_params_default(n_params):
# Generate a default function for initialising model parameters: use
# random values between 0 and 1
return lambda a_x=None, a_y=None, a_date=None, is_mult=False:\
np.random.uniform(low=0.001, high=1, size=n_params)
def _get_f_bounds_default(n_params):
# Generate a default function for model parameter boundaries. Default
# boundaries are (-inf, inf)
return lambda a_x=None, a_y=None, a_date=None: (
n_params * [-np.inf], n_params * [np.inf])
def _get_f_add_2_f_models(forecast_model1, forecast_model2):
# Add model functions of 2 ForecastModels
def f_add_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=False,
**kwargs) +
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=False,
**kwargs))
return f_add_2_f_models
def _get_f_mult_2_f_models(forecast_model1, forecast_model2):
# Multiply model functions of 2 ForecastModels
def f_mult_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=True,
**kwargs) *
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=True,
**kwargs))
return f_mult_2_f_models
def _get_f_add_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# addition
def f_add_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=False),
f_init_params2(a_x, a_y, a_date, is_mult=False)])
return f_add_2_f_init_params
def _get_f_mult_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# multiplication
def f_mult_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=True),
f_init_params2(a_x, a_y, a_date, is_mult=True)])
return f_mult_2_f_init_params
def _get_f_concat_2_bounds(forecast_model1, forecast_model2):
# Compose parameter boundary functions of 2 ForecastModels
def f_add_2_f_bounds(a_x, a_y, a_date=None):
return np.concatenate(
(forecast_model1.f_bounds(
a_x, a_y, a_date), forecast_model2.f_bounds(
a_x, a_y, a_date)), axis=1)
return f_add_2_f_bounds
def _f_validate_input_default(a_x, a_y, a_date):
# Default input validation function for a ForecastModel. Always returns
# True
return True
def _as_list(l):
return l if isinstance(l, (list,)) else [l]
# Functions used to initialize cache variables in a ForecastModel
def _f_init_cache_a_month(a_x, a_date):
return a_date.month - 1
def _f_init_cache_a_weekday(a_x, a_date):
return a_date.weekday
def _f_init_cache_a_t_fourier(a_x, a_date):
# convert to days since epoch
t = (a_date - _FOURIER_DATE_ORIGIN).days.values
i = np.arange(1, _FOURIER_HARMONICS + 1)
a_tmp = _FOURIER_K * i.reshape(i.size, 1) * t
y = np.concatenate([np.sin(a_tmp), np.cos(a_tmp)])
return y
# Dictionary to store functions used to initialize cache variables
# in a ForecastModel
# This is shared across all ForecastModel instances
_dict_f_cache = dict(
a_month=_f_init_cache_a_month,
a_weekday=_f_init_cache_a_weekday,
a_t_fourier=_f_init_cache_a_t_fourier
)
# -- Classes
class ForecastModel:
"""
Class that encapsulates model functions for use in forecasting, as well as
their number of parameters and functions for parameter initialisation.
A ForecastModel instance is initialized with a model name, a number of
model parameters, and a model function. Class instances are
callable - when called as a function, their internal model function is
used. The main purpose of ForecastModel objects is to generate predicted
values for a time series, given a set of parameters. These values can be
compared to the original series to get an array of residuals::
y_predicted = model(a_x, a_date, params)
residuals = (a_y - y_predicted)
This is used in an optimization loop to obtain the optimal parameters for
the model.
The reason for using this class instead of raw model functions is that
ForecastModel supports function composition::
model_sum = fcast_model1 + fcast_model2
# fcast_model 1 and 2 are ForecastModel instances, and so is model_sum
a_y1 = fcast_model1(
a_x, a_date, params1) + fcast_model2(a_x, a_date, params2)
params = np.concatenate([params1, params2])
a_y2 = model_sum(a_x, a_date, params)
a_y1 == a_y2 # True
Forecast models can be added or multiplied, with the + and * operators.
Multiple levels of composition are supported::
model = (model1 + model2) * model3
Model composition is used to aggregate trend and seasonality model
components, among other uses.
Model functions have the following signature:
- f(a_x, a_date, params, is_mult)
- a_x : array of floats
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- params: array of floats - model parameters - the optimisation loop
updates this to fit our actual values. Each
model function uses a fixed number of parameters.
- is_mult: boolean. True if the model is being used with multiplicative
composition. Required because
some model functions (e.g. steps) have different behaviour
when added to other models than when multiplying them.
- returns an array of floats - with same length as a_x - output of the
model defined by this object's modelling function f_model and the
current set of parameters
By default, model parameters are initialized as random values between
0 and 1. It is possible to define a parameter initialization function
that picks initial values based on the original time series.
This is passed during ForecastModel creation with the argument
f_init_params. Parameter initialization is compatible with model
composition: the initialization function of each component will be used
for that component's parameters.
Parameter initialisation functions have the following signature:
- f_init_params(a_x, a_y, is_mult)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- returns an array of floats - with length equal to this object's n_params
value
By default, model parameters have no boundaries. However, it is possible
to define a boundary function for a model, that sets boundaries for each
model parameter, based on the input time series. This is passed during
ForecastModel creation with the argument f_bounds.
Boundary definition is compatible with model composition:
the boundary function of each component will be used for that component's
parameters.
Boundary functions have the following signature:
- f_bounds(a_x, a_y, a_date)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- returns a tuple of 2 arrays of floats. The first defines minimum
parameter boundaries, and the second the maximum parameter boundaries.
As an option, we can assign a list of input validation functions to a
model. These functions analyse the inputs that will be used for fitting a
model, returning True if valid, and False otherwise. The forecast logic
will skip a model from fitting if any of the validation functions for that
model returns False.
Input validation functions have the following signature:
- f_validate_input(a_x, a_y, a_date)
- See the description of model functions above for more details on these
parameters.
Our input time series should meet the following constraints:
- Minimum required samples depends on number of model parameters
- May include null values
- May include multiple values per sample
- A date array is only required if the model is date-aware
Class Usage::
model_x = ForecastModel(name, n_params, f_model, f_init_params,
l_f_validate_input)
# Get model name
model_name = model_x.name
# Get number of model parameters
n_params = model_x.n_params
# Get parameter initialisation function
f_init_params = model_x.f_init_params
# Get initial parameters
init_params = f_init_params(t_values, y_values)
# Get model fitting function
f_model = model_x.f_model
# Get model output
y = f_model(a_x, a_date, parameters)
The following pre-generated models are available. They are available as attributes from this module: # noqa
.. csv-table:: Forecast models
:header: "name", "params", "formula","notes"
:widths: 20, 10, 20, 40
"model_null",0, "y=0", "Does nothing.
Used to disable components (e.g. seasonality)"
"model_constant",1, "y=A", "Constant model"
"model_linear",2, "y=Ax + B", "Linear model"
"model_linear_nondec",2, "y=Ax + B", "Non decreasing linear model.
With boundaries to ensure model slope >=0"
"model_quasilinear",3, "y=A*(x^B) + C", "Quasilinear model"
"model_exp",2, "y=A * B^x", "Exponential model"
"model_decay",4, "Y = A * e^(B*(x-C)) + D", "Exponential decay model"
"model_step",2, "y=0 if x<A, y=B if x>=A", "Step model"
"model_two_steps",4, "see model_step", "2 step models.
Parameter initialization is aware of # of steps."
"model_sigmoid_step",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))
", "Sigmoid step model"
"model_sigmoid",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))", "
Sigmoid model"
"model_season_wday",7, "see desc.", "Weekday seasonality model.
Assigns a constant value to each weekday"
"model_season_wday",6, "see desc.", "6-param weekday seasonality model.
As above, with one constant set to 0."
"model_season_wday_2",2, "see desc.", "Weekend seasonality model.
Assigns a constant to each of weekday/weekend"
"model_season_month",12, "see desc.", "Month seasonality model.
Assigns a constant value to each month"
"model_season_fourier_yearly",10, "see desc", "Fourier
yearly seasonality model"
"""
def __init__(
self,
name,
n_params,
f_model,
f_init_params=None,
f_bounds=None,
l_f_validate_input=None,
l_cache_vars=None,
dict_f_cache=None,
):
"""
Create ForecastModel
:param name: Model name
:type name: basestring
:param n_params: Number of parameters for model function
:type n_params: int
:param f_model: Model function
:type f_model: function
:param f_init_params: Parameter initialisation function
:type f_init_params: function
:param f_bounds: Boundary function
:type f_bounds: function
"""
self.name = name
self.n_params = n_params
self.f_model = f_model
if f_init_params is not None:
self.f_init_params = f_init_params
else:
# Default initial parameters: random values between 0 and 1
self.f_init_params = _get_f_init_params_default(n_params)
if f_bounds is not None:
self.f_bounds = f_bounds
else:
self.f_bounds = _get_f_bounds_default(n_params)
if l_f_validate_input is None:
self.l_f_validate_input = [_f_validate_input_default]
else:
self.l_f_validate_input = _as_list(l_f_validate_input)
if l_cache_vars is None:
self.l_cache_vars = []
else:
self.l_cache_vars = _as_list(l_cache_vars)
if dict_f_cache is None:
self.dict_f_cache = dict()
else:
self.dict_f_cache = dict_f_cache
# TODO - REMOVE THIS - ASSUME NORMALIZED INPUT
def _get_f_init_params_validated(f_init_params):
# Adds argument validation to a parameter initialisation function
def f_init_params_validated(
a_x=None, a_y=None, a_date=None, is_mult=False):
if a_x is not None and pd.isnull(a_x).any():
raise ValueError('a_x cannot have null values')
return f_init_params(a_x, a_y, a_date, is_mult)
return f_init_params_validated
# Add logic to f_init_params that validates input
self.f_init_params = _get_f_init_params_validated(self.f_init_params)
def __call__(self, a_x, a_date, params, is_mult=False, **kwargs):
# assert len(params)==self.n_params
return self.f_model(a_x, a_date, params, is_mult, **kwargs)
def __str__(self):
return self.name
def __repr__(self):
return 'ForecastModel:{}'.format(self.name)
def __add__(self, forecast_model):
# Check for nulls
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}+{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_add_2_f_models(self, forecast_model)
f_init_params = _get_f_add_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, forecast_model):
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}*{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_mult_2_f_models(self, forecast_model)
f_init_params = _get_f_mult_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __rmul__(self, other):
return self.__mul__(other)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.name == other.name
return NotImplemented
def __ne__(self, other):
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
return self.name < other.name
def validate_input(self, a_x, a_y, a_date):
try:
l_result = [f_validate_input(a_x, a_y, a_date)
for f_validate_input in self.l_f_validate_input]
except AssertionError:
return False
return True
def init_cache(self, a_x, a_date):
dict_cache_vars = dict()
for k in self.l_cache_vars:
f = _dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
# Search vars defined in internal cache function dictionary
for k in self.dict_f_cache:
f = self.dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
return dict_cache_vars
# - Null model: 0
def _f_model_null(a_x, a_date, params, is_mult=False, **kwargs):
# This model does nothing - used to disable model components
# (e.g. seasonality) when adding/multiplying multiple functions
return float(is_mult) # Returns 1 if multiplying, 0 if adding
model_null = ForecastModel('null', 0, _f_model_null)
# - Constant model: :math:`Y = A`
def _f_model_constant(a_x, a_date, params, is_mult=False, **kwargs):
[A] = params
y = np.full(len(a_x), A)
return y
def _f_init_params_constant(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
return np.nanmean(a_y) + np.random.uniform(0, 1, 1)
model_constant = ForecastModel(
'constant',
1,
_f_model_constant,
_f_init_params_constant)
# - Naive model: Y = Y(x-1)
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _f_model_naive(a_x, a_date, params, is_mult=False, df_actuals=None):
if df_actuals is None:
raise ValueError('model_naive requires a df_actuals argument')
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out = (
# This is not really intended to work with multiple values per sample
df_actuals.drop_duplicates('x')
.merge(df_out_tmp, how='outer')
.sort_values('x')
)
df_out['y'] = (
df_out.y.shift(1)
.fillna(method='ffill')
.fillna(method='bfill')
)
df_out = df_out.loc[df_out.x.isin(a_x)]
# df_out = df_out_tmp.merge(df_out, how='left')
# TODO: CHECK THAT X,DATE order is preserved
# TODO: df_out = df_out.merge(df_out_tmp, how='right')
return df_out.y.values
model_naive = ForecastModel('naive', 0, _f_model_naive)
# - Seasonal naive model
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _fillna_wday(df):
"""
In a time series, shift samples by 1 week
and fill gaps with data from same weekday
"""
def add_col_y_out(df):
df = df.assign(y_out=df.y.shift(1).fillna(method='ffill'))
return df
df_out = (
df
.assign(wday=df.date.dt.weekday)
.groupby('wday', as_index=False).apply(add_col_y_out)
.sort_values(['x'])
.reset_index(drop=True)
)
return df_out
def _f_model_snaive_wday(a_x, a_date, params, is_mult=False, df_actuals=None):
"""Naive model - takes last valid weekly sample"""
if df_actuals is None:
raise ValueError('model_snaive_wday requires a df_actuals argument')
# df_actuals_model - table with actuals samples,
# adding y_out column with naive model values
df_actuals_model = _fillna_wday(df_actuals.drop_duplicates('x'))
# df_last_week - table with naive model values from last actuals week,
# to use in extrapolation
df_last_week = (
df_actuals_model
# Fill null actual values with data from previous weeks
.assign(y=df_actuals_model.y.fillna(df_actuals_model.y_out))
.drop_duplicates('wday', keep='last')
[['wday', 'y']]
.rename(columns=dict(y='y_out'))
)
# Generate table with extrapolated samples
df_out_tmp = | pd.DataFrame({'date': a_date, 'x': a_x}) | pandas.DataFrame |
import gensim
import fasttext
import pandas as pd
import os
def generate_w2v(corpus,emb_path,tokenizer=None,min_count=2,size=200,epochs=5,window=3,sg=1,hs=1,negative=5,max_vocab_size=10000,sorted_vocab=1,seed=42):
'''
tokenizer supports NLTK tokenizer, spacy tokenizer, huggingface tokenizer
NLTK -> use nltk.tokenize.WordPunctTokenizer().tokenize
Spacy -> spacy_tokenizer('en_core_web_sm')
Huggingface -> transformers.BertTokenizer.from_pretrained('bert-base-uncased').tokenize
'''
try:
os.makedirs(emb_path)
except OSError:
pass
if type(corpus[0]) == str:
if tokenizer:
corpus = [tokenizer(i) for i in corpus]
else:
corpus = [i.split() for i in corpus]
model = gensim.models.word2vec.Word2Vec(min_count=min_count,size=size,window=window,sg=sg,hs=hs,negative=negative, \
max_vocab_size=max_vocab_size,sorted_vocab=sorted_vocab,seed=seed)
model.build_vocab(corpus)
model.train(corpus,total_examples=model.corpus_count,epochs=epochs)
model.wv.save_word2vec_format(os.path.join(emb_path,"w2v.txt"),binary=False)
def generate_fasttext(corpus,text_filepath,emb_path,cbow=False,min_count=2,minn=3, maxn=5, dim=200,epochs=5,lr=.1,neg=5,ws=5):
try:
os.makedirs(emb_path)
except OSError:
pass
try:
os.makedirs(text_filepath)
except OSError:
pass
if type(corpus[0]) == list:
corpus = [" ".join(i) for i in corpus]
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import pytest
from woodwork.logical_types import Datetime, Double, Integer, NaturalLanguage
from featuretools.entityset import EntitySet
from featuretools.tests.testing_utils import get_df_tags
from featuretools.utils.gen_utils import Library, import_or_none
from featuretools.utils.koalas_utils import pd_to_ks_clean
ks = import_or_none('databricks.koalas')
@pytest.mark.skipif('not ks')
def test_add_dataframe_from_ks_df(pd_es):
cleaned_df = pd_to_ks_clean(pd_es["log"])
log_ks = ks.from_pandas(cleaned_df)
ks_es = EntitySet(id="ks_es")
ks_es = ks_es.add_dataframe(
dataframe_name="log_ks",
dataframe=log_ks,
index="id",
time_index="datetime",
logical_types=pd_es["log"].ww.logical_types,
semantic_tags=get_df_tags(pd_es["log"])
)
pd.testing.assert_frame_equal(cleaned_df, ks_es["log_ks"].to_pandas(), check_like=True)
@pytest.mark.skipif('not ks')
def test_add_dataframe_with_non_numeric_index(pd_es, ks_es):
df = pd.DataFrame({"id": | pd.Series(["A_1", "A_2", "C", "D"], dtype='string') | pandas.Series |
# Copyright (c) 2017, Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>'
__copyright__ = "Copyright (c) 2017, Intel Research and Development Ireland Ltd."
__license__ = "Apache 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pandas
from analytics_engine.heuristics.beans.infograph import InfoGraphNode
from analytics_engine import common
LOG = common.LOG
class SnapUtils(object):
@staticmethod
def annotate_machine_pu_util(internal_graph, node):
source = InfoGraphNode.get_machine_name_of_pu(node)
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_compute_utilization(machine)
if 'intel/use/compute/utilization' not in machine_util.columns:
sum_util = None
cpu_metric = 'intel/procfs/cpu/utilization_percentage'
pu_util_df = InfoGraphNode.get_compute_utilization(node)
if cpu_metric in pu_util_df.columns:
pu_util = pu_util_df[cpu_metric]
pu_util = pu_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = pu_util
InfoGraphNode.set_compute_utilization(machine, machine_util)
else:
LOG.info('CPU util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def annotate_machine_disk_util(internal_graph, node):
source = InfoGraphNode.get_attributes(node)['allocation']
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_disk_utilization(machine)
if 'intel/use/disk/utilization' not in machine_util.columns:
disk_metric = 'intel/procfs/disk/utilization_percentage'
disk_util_df = InfoGraphNode.get_disk_utilization(node)
if disk_metric in disk_util_df.columns:
disk_util = disk_util_df[disk_metric]
disk_util = disk_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = disk_util
InfoGraphNode.set_disk_utilization(machine, machine_util)
else:
LOG.info('Disk util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use disk for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def annotate_machine_network_util(internal_graph, node):
source = InfoGraphNode.get_attributes(node)['allocation']
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_network_utilization(machine)
if 'intel/use/network/utilization' not in machine_util.columns:
net_metric = 'intel/psutil/net/utilization_percentage'
net_util_df = InfoGraphNode.get_network_utilization(node)
if net_metric in net_util_df.columns:
net_util = net_util_df[net_metric]
net_util = net_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = net_util
InfoGraphNode.set_network_utilization(machine, machine_util)
else:
LOG.info('Net util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use network for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def utilization(internal_graph, node, telemetry):
# machine usage
telemetry_data = telemetry.get_data(node)
if 'intel/use/compute/utilization' in telemetry_data:
InfoGraphNode.set_compute_utilization(node,
pandas.DataFrame(telemetry_data['intel/use/compute/utilization'],
columns=['intel/use/compute/utilization']))
# pu usage
if 'intel/procfs/cpu/utilization_percentage' in telemetry_data:
InfoGraphNode.set_compute_utilization(node,
pandas.DataFrame(
telemetry_data['intel/procfs/cpu/utilization_percentage'],
columns=['intel/procfs/cpu/utilization_percentage']))
if 'intel/use/memory/utilization' in telemetry_data:
InfoGraphNode.set_memory_utilization(node, pandas.DataFrame(telemetry_data['intel/use/memory/utilization']))
if 'intel/use/disk/utilization' in telemetry_data:
InfoGraphNode.set_disk_utilization(node, pandas.DataFrame(telemetry_data['intel/use/disk/utilization']))
if 'intel/use/network/utilization' in telemetry_data:
InfoGraphNode.set_network_utilization(node,
pandas.DataFrame(telemetry_data['intel/use/network/utilization']))
# supporting not available /use/ metrics
if 'intel/procfs/meminfo/mem_total' in telemetry_data and 'intel/procfs/meminfo/mem_used' in telemetry_data:
# LOG.info('Found memory utilization procfs')
mem_used = telemetry_data['intel/procfs/meminfo/mem_used'].fillna(0)
mem_total = telemetry_data['intel/procfs/meminfo/mem_total'].fillna(0)
mem_util = mem_used * 100 / mem_total
mem_util.name = 'intel/procfs/memory/utilization_percentage'
InfoGraphNode.set_memory_utilization(node, pandas.DataFrame(mem_util))
if 'intel/procfs/disk/io_time' in telemetry_data:
io_time = telemetry_data['intel/procfs/disk/io_time'].fillna(0)
disk_util = io_time*100/1000
disk_util.name = 'intel/procfs/disk/utilization_percentage'
InfoGraphNode.set_disk_utilization(node, pandas.DataFrame(disk_util))
if 'intel/psutil/net/bytes_recv' in telemetry_data and 'intel/psutil/net/bytes_sent' in telemetry_data:
source=telemetry._source(node)
machine = InfoGraphNode.get_node(internal_graph, source)
nic_speed = InfoGraphNode.get_nic_speed_mbps(machine) * 1000000
net_data = telemetry_data.filter(['timestamp', 'intel/psutil/net/bytes_recv','intel/psutil/net/bytes_sent'], axis=1)
net_data.fillna(0)
net_data['intel/psutil/net/bytes_total'] = net_data['intel/psutil/net/bytes_recv']+net_data['intel/psutil/net/bytes_sent']
net_data_interval = net_data.set_index('timestamp').diff()
net_data_interval['intel/psutil/net/utilization_percentage'] = net_data_interval['intel/psutil/net/bytes_total'] * 100 /nic_speed
net_data_pct = pandas.DataFrame(net_data_interval['intel/psutil/net/utilization_percentage'])
InfoGraphNode.set_network_utilization(node, net_data_pct)
elif 'intel/procfs/iface/bytes_recv' in telemetry_data and 'intel/procfs/iface/bytes_recv' in telemetry_data:
source=telemetry._source(node)
machine = InfoGraphNode.get_node(internal_graph, source)
nic_speed = InfoGraphNode.get_nic_speed_mbps(machine) * 1000000
net_data = telemetry_data.filter(['timestamp', 'intel/procfs/iface/bytes_recv','intel/procfs/iface/bytes_sent'], axis=1)
net_data.fillna(0)
net_data['intel/psutil/net/bytes_total'] = net_data['intel/procfs/iface/bytes_recv']+net_data['intel/procfs/iface/bytes_sent']
net_data_interval = net_data.set_index('timestamp').diff()
net_data_interval['intel/psutil/net/utilization_percentage'] = net_data_interval['intel/psutil/net/bytes_total'] * 100 /nic_speed
net_data_pct = pandas.DataFrame(net_data_interval['intel/psutil/net/utilization_percentage'])
InfoGraphNode.set_network_utilization(node, net_data_pct)
if 'intel/docker/stats/cgroups/cpu_stats/cpu_usage/total' in telemetry_data:
# Container node
#cpu util
cpu_data = telemetry_data.filter(['timestamp', 'intel/docker/stats/cgroups/cpu_stats/cpu_usage/total'], axis=1)
cpu_data_interval = cpu_data.set_index('timestamp').diff()
#util data in nanoseconds
cpu_data_interval['intel/docker/stats/cgroups/cpu_stats/cpu_usage/percentage'] = cpu_data_interval['intel/docker/stats/cgroups/cpu_stats/cpu_usage/total'] / 10000000
cpu_data_pct = pandas.DataFrame(cpu_data_interval['intel/docker/stats/cgroups/cpu_stats/cpu_usage/percentage'])
InfoGraphNode.set_compute_utilization(node, cpu_data_pct)
if "intel/docker/stats/cgroups/memory_stats/usage/usage" in telemetry_data:
#container mem util
source=telemetry._source(node)
machine = InfoGraphNode.get_node(internal_graph, source)
local_mem = int(InfoGraphNode.get_attributes(machine).get("local_memory"))
mem_data = telemetry_data.filter(['timestamp', "intel/docker/stats/cgroups/memory_stats/usage/usage"], axis=1)
mem_data["intel/docker/stats/cgroups/memory_stats/usage/percentage"] = mem_data["intel/docker/stats/cgroups/memory_stats/usage/usage"]/local_mem * 100
mem_data_pct = pandas.DataFrame(mem_data["intel/docker/stats/cgroups/memory_stats/usage/percentage"])
InfoGraphNode.set_memory_utilization(node, mem_data_pct)
if "intel/docker/stats/network/tx_bytes" in telemetry_data:
#container network util
source=telemetry._source(node)
machine = InfoGraphNode.get_node(internal_graph, source)
nic_speed = InfoGraphNode.get_nic_speed_mbps(machine) * 1000000
net_data = telemetry_data.filter(['timestamp', "intel/docker/stats/network/tx_bytes","intel/docker/stats/network/rx_bytes"], axis=1)
net_data.fillna(0)
net_data['intel/docker/stats/network/bytes_total'] = net_data["intel/docker/stats/network/tx_bytes"]+net_data["intel/docker/stats/network/rx_bytes"]
net_data_interval = net_data.set_index('timestamp').diff()
net_data_interval['intel/docker/stats/network/utilization_percentage'] = net_data_interval['intel/docker/stats/network/bytes_total'] * 100 /nic_speed
net_data_pct = | pandas.DataFrame(net_data_interval['intel/docker/stats/network/utilization_percentage']) | pandas.DataFrame |
from __future__ import division
import unittest
import json
import datetime
import pandas as pd
import great_expectations as ge
from .test_utils import assertDeepAlmostEqual
class TestPandasDataset(unittest.TestCase):
def run_encapsulated_test(self, expectation_name, filename):
with open(filename) as f:
T = json.load(f)
D = ge.dataset.PandasDataset(T["dataset"])
D.set_default_expectation_argument("output_format", "COMPLETE")
self.maxDiff = None
for t in T["tests"]:
if "title" in t:
print(t["title"])
else:
print("WARNING: test set has no `title` field. In future versions of Great Expectations, this will be required.")
expectation = getattr(D, expectation_name)
out = expectation(**t['in'])
out = json.loads(json.dumps(out))
self.assertEqual(out, t['out'])
# def test_expect_column_values_to_be_between(self):
# """
# """
# with open("./tests/test_sets/expect_column_values_to_be_between_test_set.json") as f:
# fixture = json.load(f)
# dataset = fixture["dataset"]
# tests = fixture["tests"]
# D = ge.dataset.PandasDataset(dataset)
# D.set_default_expectation_argument("result_format", "COMPLETE")
# self.maxDiff = None
# for t in tests:
# out = D.expect_column_values_to_be_between(**t['in'])
# # print '-'*80
# print(t)
# # print(json.dumps(out, indent=2))
# if 'out' in t:
# self.assertEqual(t['out']['success'], out['success'])
# if 'unexpected_index_list' in t['out']:
# self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
# if 'unexpected_list' in t['out']:
# self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
# if 'error' in t:
# self.assertEqual(out['exception_info']['raised_exception'], True)
# self.assertIn(t['error']['traceback_substring'], out['exception_info']['exception_traceback'])
# def test_expect_column_values_to_match_regex_list(self):
# with open("./tests/test_sets/expect_column_values_to_match_regex_list_test_set.json") as f:
# J = json.load(f)
# D = ge.dataset.PandasDataset(J["dataset"])
# D.set_default_expectation_argument("result_format", "COMPLETE")
# T = J["tests"]
# self.maxDiff = None
# for t in T:
# out = D.expect_column_values_to_match_regex_list(**t['in'])
# self.assertEqual(t['out']['success'], out['success'])
# if 'unexpected_index_list' in t['out']:
# self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
# if 'unexpected_list' in t['out']:
# self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
def test_expect_column_values_to_match_strftime_format(self):
"""
"""
D = ge.dataset.PandasDataset({
'x' : [1,2,4],
'us_dates' : ['4/30/2017','4/30/2017','7/4/1776'],
'us_dates_type_error' : ['4/30/2017','4/30/2017', 5],
'almost_iso8601' : ['1977-05-25T00:00:00', '1980-05-21T13:47:59', '2017-06-12T23:57:59'],
'almost_iso8601_val_error' : ['1977-05-55T00:00:00', '1980-05-21T13:47:59', '2017-06-12T23:57:59'],
'already_datetime' : [datetime.datetime(2015,1,1), datetime.datetime(2016,1,1), datetime.datetime(2017,1,1)]
})
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
'in':{'column':'us_dates', 'strftime_format':'%m/%d/%Y'},
'out':{'success':True, 'unexpected_index_list':[], 'unexpected_list':[]}
},
{
'in':{'column':'us_dates_type_error','strftime_format':'%m/%d/%Y', 'mostly': 0.5, 'catch_exceptions': True},
# 'out':{'success':True, 'unexpected_index_list':[2], 'unexpected_list':[5]}},
'error':{
'traceback_substring' : 'TypeError'
},
},
{
'in':{'column':'us_dates_type_error','strftime_format':'%m/%d/%Y', 'catch_exceptions': True},
'error':{
'traceback_substring' : 'TypeError'
}
},
{
'in':{'column':'almost_iso8601','strftime_format':'%Y-%m-%dT%H:%M:%S'},
'out':{'success':True,'unexpected_index_list':[], 'unexpected_list':[]}},
{
'in':{'column':'almost_iso8601_val_error','strftime_format':'%Y-%m-%dT%H:%M:%S'},
'out':{'success':False,'unexpected_index_list':[0], 'unexpected_list':['1977-05-55T00:00:00']}},
{
'in':{'column':'already_datetime','strftime_format':'%Y-%m-%d', 'catch_exceptions':True},
# 'out':{'success':False,'unexpected_index_list':[0], 'unexpected_list':['1977-05-55T00:00:00']},
'error':{
'traceback_substring' : 'TypeError: Values passed to expect_column_values_to_match_strftime_format must be of type string.'
},
}
]
for t in T:
out = D.expect_column_values_to_match_strftime_format(**t['in'])
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
elif 'error' in t:
self.assertEqual(out['exception_info']['raised_exception'], True)
self.assertIn(t['error']['traceback_substring'], out['exception_info']['exception_traceback'])
def test_expect_column_values_to_be_dateutil_parseable(self):
D = ge.dataset.PandasDataset({
'c1':['03/06/09','23 April 1973','January 9, 2016'],
'c2':['9/8/2012','covfefe',25],
'c3':['Jared','June 1, 2013','July 18, 1976'],
'c4':['1', '2', '49000004632'],
'already_datetime' : [datetime.datetime(2015,1,1), datetime.datetime(2016,1,1), datetime.datetime(2017,1,1)],
})
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
'in':{'column': 'c1'},
'out':{'success':True, 'unexpected_list':[], 'unexpected_index_list': []}},
{
'in':{"column":'c2', "catch_exceptions":True},
# 'out':{'success':False, 'unexpected_list':['covfefe', 25], 'unexpected_index_list': [1, 2]}},
'error':{ 'traceback_substring' : 'TypeError: Values passed to expect_column_values_to_be_dateutil_parseable must be of type string' },
},
{
'in':{"column":'c3'},
'out':{'success':False, 'unexpected_list':['Jared'], 'unexpected_index_list': [0]}},
{
'in':{'column': 'c3', 'mostly':.5},
'out':{'success':True, 'unexpected_list':['Jared'], 'unexpected_index_list': [0]}
},
{
'in':{'column': 'c4'},
'out':{'success':False, 'unexpected_list':['49000004632'], 'unexpected_index_list': [2]}
},
{
'in':{'column':'already_datetime', 'catch_exceptions':True},
'error':{ 'traceback_substring' : 'TypeError: Values passed to expect_column_values_to_be_dateutil_parseable must be of type string' },
}
]
for t in T:
out = D.expect_column_values_to_be_dateutil_parseable(**t['in'])
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
elif 'error' in t:
self.assertEqual(out['exception_info']['raised_exception'], True)
self.assertIn(t['error']['traceback_substring'], out['exception_info']['exception_traceback'])
def test_expect_column_values_to_be_json_parseable(self):
d1 = json.dumps({'i':[1,2,3],'j':35,'k':{'x':'five','y':5,'z':'101'}})
d2 = json.dumps({'i':1,'j':2,'k':[3,4,5]})
d3 = json.dumps({'i':'a', 'j':'b', 'k':'c'})
d4 = json.dumps({'i':[4,5], 'j':[6,7], 'k':[8,9], 'l':{4:'x', 5:'y', 6:'z'}})
D = ge.dataset.PandasDataset({
'json_col':[d1,d2,d3,d4],
'not_json':[4,5,6,7],
'py_dict':[{'a':1, 'out':1},{'b':2, 'out':4},{'c':3, 'out':9},{'d':4, 'out':16}],
'most':[d1,d2,d3,'d4']
})
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
'in':{'column':'json_col'},
'out':{'success':True, 'unexpected_index_list':[], 'unexpected_list':[]}},
{
'in':{'column':'not_json'},
'out':{'success':False, 'unexpected_index_list':[0,1,2,3], 'unexpected_list':[4,5,6,7]}},
{
'in':{'column':'py_dict'},
'out':{'success':False, 'unexpected_index_list':[0,1,2,3], 'unexpected_list':[{'a':1, 'out':1},{'b':2, 'out':4},{'c':3, 'out':9},{'d':4, 'out':16}]}},
{
'in':{'column':'most'},
'out':{'success':False, 'unexpected_index_list':[3], 'unexpected_list':['d4']}},
{
'in':{'column':'most', 'mostly':.75},
'out':{'success':True, 'unexpected_index_list':[3], 'unexpected_list':['d4']}}
]
for t in T:
out = D.expect_column_values_to_be_json_parseable(**t['in'])
self.assertEqual(t['out']['success'], out['success'])
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
# def test_expect_column_values_to_match_json_schema(self):
# with open("./tests/test_sets/expect_column_values_to_match_json_schema_test_set.json") as f:
# J = json.load(f)
# D = ge.dataset.PandasDataset(J["dataset"])
# D.set_default_expectation_argument("result_format", "COMPLETE")
# T = J["tests"]
# self.maxDiff = None
# for t in T:
# out = D.expect_column_values_to_match_json_schema(**t['in'])#, **t['kwargs'])
# self.assertEqual(t['out']['success'], out['success'])
# if 'unexpected_index_list' in t['out']:
# self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
# if 'unexpected_list' in t['out']:
# self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
def test_expectation_decorator_summary_mode(self):
df = ge.dataset.PandasDataset({
'x' : [1,2,3,4,5,6,7,7,None,None],
})
df.set_default_expectation_argument("result_format", "COMPLETE")
# print '&'*80
# print json.dumps(df.expect_column_values_to_be_between('x', min_value=1, max_value=5, result_format="SUMMARY"), indent=2)
self.maxDiff = None
self.assertEqual(
df.expect_column_values_to_be_between('x', min_value=1, max_value=5, result_format="SUMMARY"),
{
"success" : False,
"result" : {
"element_count" : 10,
"missing_count" : 2,
"missing_percent" : .2,
"unexpected_count" : 3,
"partial_unexpected_counts": [
{"value": 7.0,
"count": 2},
{"value": 6.0,
"count": 1}
],
"unexpected_percent": 0.3,
"unexpected_percent_nonmissing": 0.375,
"partial_unexpected_list" : [6.0,7.0,7.0],
"partial_unexpected_index_list": [5,6,7],
}
}
)
self.assertEqual(
df.expect_column_mean_to_be_between("x", 3, 7, result_format="SUMMARY"),
{
'success': True,
'result': {
'observed_value': 4.375,
'element_count': 10,
'missing_count': 2,
'missing_percent': .2
},
}
)
def test_positional_arguments(self):
df = ge.dataset.PandasDataset({
'x':[1,3,5,7,9],
'y':[2,4,6,8,10],
'z':[None,'a','b','c','abc']
})
df.set_default_expectation_argument('result_format', 'COMPLETE')
self.assertEqual(
df.expect_column_mean_to_be_between('x',4,6),
{'success':True, 'result': {'observed_value': 5, 'element_count': 5,
'missing_count': 0,
'missing_percent': 0.0}}
)
out = df.expect_column_values_to_be_between('y',1,6)
t = {'out': {'success':False, 'unexpected_list':[8,10], 'unexpected_index_list': [3,4]}}
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
out = df.expect_column_values_to_be_between('y',1,6,mostly=.5)
t = {'out': {'success':True, 'unexpected_list':[8,10], 'unexpected_index_list':[3,4]}}
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
out = df.expect_column_values_to_be_in_set('z',['a','b','c'])
t = {'out': {'success':False, 'unexpected_list':['abc'], 'unexpected_index_list':[4]}}
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
out = df.expect_column_values_to_be_in_set('z',['a','b','c'],mostly=.5)
t = {'out': {'success':True, 'unexpected_list':['abc'], 'unexpected_index_list':[4]}}
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
def test_result_format_argument_in_decorators(self):
df = ge.dataset.PandasDataset({
'x':[1,3,5,7,9],
'y':[2,4,6,8,10],
'z':[None,'a','b','c','abc']
})
df.set_default_expectation_argument('result_format', 'COMPLETE')
#Test explicit Nones in result_format
self.assertEqual(
df.expect_column_mean_to_be_between('x',4,6, result_format=None),
{'success':True, 'result': {'observed_value': 5, 'element_count': 5,
'missing_count': 0,
'missing_percent': 0.0
}}
)
self.assertEqual(
df.expect_column_values_to_be_between('y',1,6, result_format=None),
{'result': {'element_count': 5,
'missing_count': 0,
'missing_percent': 0.0,
'partial_unexpected_counts': [{'count': 1, 'value': 8},
{'count': 1, 'value': 10}],
'partial_unexpected_index_list': [3, 4],
'partial_unexpected_list': [8, 10],
'unexpected_count': 2,
'unexpected_index_list': [3, 4],
'unexpected_list': [8, 10],
'unexpected_percent': 0.4,
'unexpected_percent_nonmissing': 0.4},
'success': False}
)
#Test unknown output format
with self.assertRaises(ValueError):
df.expect_column_values_to_be_between('y',1,6, result_format="QUACK")
with self.assertRaises(ValueError):
df.expect_column_mean_to_be_between('x',4,6, result_format="QUACK")
def test_from_pandas(self):
pd_df = pd.DataFrame({
'x':[1,3,5,7,9],
'y':[2,4,6,8,10],
'z':[None,'a','b','c','abc']
})
ge_df = ge.from_pandas(pd_df)
self.assertIsInstance(ge_df, ge.dataset.Dataset)
self.assertEquals(list(ge_df.columns), ['x', 'y', 'z'])
self.assertEquals(list(ge_df['x']), list(pd_df['x']))
self.assertEquals(list(ge_df['y']), list(pd_df['y']))
self.assertEquals(list(ge_df['z']), list(pd_df['z']))
def test_from_pandas_expectations_config(self):
# Logic mostly copied from TestValidation.test_validate
def load_ge_config(file):
with open(file) as f:
return json.load(f)
my_expectations_config = load_ge_config("./tests/test_sets/titanic_expectations.json")
pd_df = pd.read_csv("./tests/test_sets/Titanic.csv")
my_df = ge.from_pandas(pd_df, expectations_config=my_expectations_config)
my_df.set_default_expectation_argument("result_format", "COMPLETE")
results = my_df.validate(catch_exceptions=False)
expected_results = load_ge_config("./tests/test_sets/expected_results_20180303.json")
self.maxDiff = None
assertDeepAlmostEqual(self, results, expected_results)
def test_ge_pandas_concatenating(self):
df1 = ge.dataset.PandasDataset({
'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']
})
df1.expect_column_values_to_match_regex('A', '^A[0-2]$')
df1.expect_column_values_to_match_regex('B', '^B[0-2]$')
df2 = ge.dataset.PandasDataset({
'A': ['A3', 'A4', 'A5'],
'B': ['B3', 'B4', 'B5']
})
df2.expect_column_values_to_match_regex('A', '^A[3-5]$')
df2.expect_column_values_to_match_regex('B', '^B[3-5]$')
df = | pd.concat([df1, df2]) | pandas.concat |
import itertools
from numpy import nan
import numpy as np
from pandas.core.index import Index, _ensure_index
import pandas.core.common as com
import pandas._tseries as lib
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas data
structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim']
def __init__(self, values, items, ref_items, ndim=2,
do_integrity_check=False):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
assert(values.ndim == ndim)
assert(len(items) == len(values))
self.values = values
self.ndim = ndim
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
if do_integrity_check:
self._check_integrity()
def _check_integrity(self):
if len(self.items) < 2:
return
# monotonicity
return (self.ref_locs[1:] > self.ref_locs[:-1]).all()
_ref_locs = None
@property
def ref_locs(self):
if self._ref_locs is None:
indexer = self.ref_items.get_indexer(self.items)
assert((indexer != -1).all())
self._ref_locs = indexer
return self._ref_locs
def set_ref_items(self, ref_items, maybe_rename=True):
"""
If maybe_rename=True, need to set the items for this guy
"""
assert(isinstance(ref_items, Index))
if maybe_rename:
self.items = ref_items.take(self.ref_locs)
self.ref_items = ref_items
def __repr__(self):
shape = ' x '.join([str(s) for s in self.shape])
name = type(self).__name__
return '%s: %s, %s, dtype %s' % (name, self.items, shape, self.dtype)
def __contains__(self, item):
return item in self.items
def __len__(self):
return len(self.values)
def __getstate__(self):
# should not pickle generally (want to share ref_items), but here for
# completeness
return (self.items, self.ref_items, self.values)
def __setstate__(self, state):
items, ref_items, values = state
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
self.values = values
self.ndim = values.ndim
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, self.items, self.ref_items)
def merge(self, other):
assert(self.ref_items.equals(other.ref_items))
# Not sure whether to allow this or not
# if not union_ref.equals(other.ref_items):
# union_ref = self.ref_items + other.ref_items
return _merge_blocks([self, other], self.ref_items)
def reindex_axis(self, indexer, mask, needs_masking, axis=0):
"""
Reindex using pre-computed indexer information
"""
if self.values.size > 0:
new_values = com.take_fast(self.values, indexer, mask,
needs_masking, axis=axis)
else:
shape = list(self.shape)
shape[axis] = len(indexer)
new_values = np.empty(shape)
new_values.fill(np.nan)
return make_block(new_values, self.items, self.ref_items)
def reindex_items_from(self, new_ref_items, copy=True):
"""
Reindex to only those items contained in the input set of items
E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
then the resulting items will be ['b']
Returns
-------
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
else:
mask = indexer != -1
masked_idx = indexer[mask]
if self.values.ndim == 2:
new_values = com.take_2d(self.values, masked_idx, axis=0,
needs_masking=False)
else:
new_values = self.values.take(masked_idx, axis=0)
new_items = self.items.take(masked_idx)
return make_block(new_values, new_items, new_ref_items)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
self.values[loc] = value
def delete(self, item):
"""
Returns
-------
y : Block (new object)
"""
loc = self.items.get_loc(item)
new_items = self.items.delete(loc)
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items)
def split_block_at(self, item):
"""
Split block around given column, for "deleting" a column without
having to copy data by returning views on the original array
Returns
-------
leftb, rightb : (Block or None, Block or None)
"""
loc = self.items.get_loc(item)
if len(self.items) == 1:
# no blocks left
return None, None
if loc == 0:
# at front
left_block = None
right_block = make_block(self.values[1:], self.items[1:].copy(),
self.ref_items)
elif loc == len(self.values) - 1:
# at back
left_block = make_block(self.values[:-1], self.items[:-1].copy(),
self.ref_items)
right_block = None
else:
# in the middle
left_block = make_block(self.values[:loc],
self.items[:loc].copy(), self.ref_items)
right_block = make_block(self.values[loc + 1:],
self.items[loc + 1:].copy(), self.ref_items)
return left_block, right_block
def fillna(self, value):
new_values = self.values.copy()
mask = com.isnull(new_values.ravel())
new_values.flat[mask] = value
return make_block(new_values, self.items, self.ref_items)
#-------------------------------------------------------------------------------
# Is this even possible?
class FloatBlock(Block):
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating)
class IntBlock(Block):
def should_store(self, value):
return issubclass(value.dtype.type, np.integer)
class BoolBlock(Block):
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
class ObjectBlock(Block):
def should_store(self, value):
return not issubclass(value.dtype.type,
(np.integer, np.floating, np.bool_))
def make_block(values, items, ref_items, do_integrity_check=False):
dtype = values.dtype
vtype = dtype.type
if issubclass(vtype, np.floating):
klass = FloatBlock
elif issubclass(vtype, np.integer):
if vtype != np.int64:
values = values.astype('i8')
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
else:
klass = ObjectBlock
return klass(values, items, ref_items, ndim=values.ndim,
do_integrity_check=do_integrity_check)
# TODO: flexible with index=None and/or items=None
class BlockManager(object):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', 'ndim']
def __init__(self, blocks, axes, do_integrity_check=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = blocks
ndim = len(axes)
for block in blocks:
assert(ndim == block.values.ndim)
if do_integrity_check:
self._verify_integrity()
def __nonzero__(self):
return True
@property
def ndim(self):
return len(self.axes)
def is_mixed_dtype(self):
counts = set()
for block in self.blocks:
counts.add(block.dtype)
if len(counts) > 1:
return True
return False
def set_axis(self, axis, value):
cur_axis = self.axes[axis]
if len(value) != len(cur_axis):
raise Exception('Length mismatch (%d vs %d)'
% (len(value), len(cur_axis)))
self.axes[axis] = _ensure_index(value)
if axis == 0:
for block in self.blocks:
block.set_ref_items(self.items, maybe_rename=True)
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def set_items_norename(self, value):
value = _ensure_index(value)
self.axes[0] = value
for block in self.blocks:
block.set_ref_items(value, maybe_rename=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [b.items for b in self.blocks]
axes_array = [ax for ax in self.axes]
return axes_array, block_values, block_items
def __setstate__(self, state):
# discard anything after 3rd, support beta pickling format for a little
# while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
blocks = []
for values, items in zip(bvalues, bitems):
blk = make_block(values, items, self.axes[0],
do_integrity_check=True)
blocks.append(blk)
self.blocks = blocks
def __len__(self):
return len(self.items)
def __repr__(self):
output = 'BlockManager'
for i, ax in enumerate(self.axes):
if i == 0:
output += '\nItems: %s' % ax
else:
output += '\nAxis %d: %s' % (i, ax)
for block in self.blocks:
output += '\n%s' % repr(block)
return output
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
def _verify_integrity(self):
_union_block_items(self.blocks)
mgr_shape = self.shape
for block in self.blocks:
assert(block.values.shape[1:] == mgr_shape[1:])
tot_items = sum(len(x.items) for x in self.blocks)
assert(len(self.items) == tot_items)
def astype(self, dtype):
new_blocks = []
for block in self.blocks:
newb = make_block(block.values.astype(dtype), block.items,
block.ref_items)
new_blocks.append(newb)
new_mgr = BlockManager(new_blocks, self.axes)
return new_mgr.consolidate()
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
dtypes = [blk.dtype for blk in self.blocks]
return len(dtypes) == len(set(dtypes))
def get_slice(self, slobj, axis=0):
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
if axis == 0:
new_items = new_axes[0]
if len(self.blocks) == 1:
blk = self.blocks[0]
newb = make_block(blk.values[slobj], new_items,
new_items)
new_blocks = [newb]
else:
return self.reindex_items(new_items)
else:
new_blocks = self._slice_blocks(slobj, axis)
return BlockManager(new_blocks, new_axes, do_integrity_check=False)
def _slice_blocks(self, slobj, axis):
new_blocks = []
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = slobj
slicer = tuple(slicer)
for block in self.blocks:
newb = make_block(block.values[slicer], block.items,
block.ref_items)
new_blocks.append(newb)
return new_blocks
def get_series_dict(self):
# For DataFrame
return _blocks_to_series_dict(self.blocks, self.axes[1])
@classmethod
def from_blocks(cls, blocks, index):
# also checks for overlap
items = _union_block_items(blocks)
return BlockManager(blocks, [items, index])
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean, default True
If False, return shallow copy (do not copy data)
Returns
-------
copy : BlockManager
"""
copy_blocks = [block.copy(deep=deep) for block in self.blocks]
# copy_axes = [ax.copy() for ax in self.axes]
copy_axes = list(self.axes)
return BlockManager(copy_blocks, copy_axes, do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
mat = np.empty(self.shape, dtype=float)
elif len(self.blocks) == 1:
blk = self.blocks[0]
if items is None or blk.items.equals(items):
# if not, then just call interleave per below
mat = blk.values
else:
mat = self.reindex_items(items).as_matrix()
else:
if items is None:
mat = self._interleave(self.items)
else:
mat = self.reindex_items(items).as_matrix()
return mat
def _interleave(self, items):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
items = _ensure_index(items)
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(len(items), dtype=bool)
# By construction, all of the item should be covered by one of the
# blocks
for block in self.blocks:
indexer = items.get_indexer(block.items)
assert((indexer != -1).all())
result[indexer] = block.values
itemmask[indexer] = 1
assert(itemmask.all())
return result
def xs(self, key, axis=1, copy=True):
assert(axis >= 1)
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.append(newb)
elif len(self.blocks) == 1:
vals = self.blocks[0].values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
def fast_2d_xs(self, loc, copy=False):
"""
"""
if len(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if copy:
result = result.copy()
return result
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
dtype = _interleaved_dtype(self.blocks)
items = self.items
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
values = blk.values
for j, item in enumerate(blk.items):
i = items.get_loc(item)
result[i] = values[j, loc]
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
new_blocks = _consolidate(self.blocks, self.items)
return BlockManager(new_blocks, self.axes)
def get(self, item):
_, block = self._find_block(item)
return block.get(item)
def get_scalar(self, tup):
"""
Retrieve single item
"""
item = tup[0]
_, blk = self._find_block(item)
# this could obviously be seriously sped up in cython
item_loc = blk.items.get_loc(item),
full_loc = item_loc + tuple(ax.get_loc(x)
for ax, x in zip(self.axes[1:], tup[1:]))
return blk.values[full_loc]
def delete(self, item):
i, _ = self._find_block(item)
loc = self.items.get_loc(item)
new_items = Index(np.delete(np.asarray(self.items), loc))
self._delete_from_block(i, item)
self.set_items_norename(new_items)
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
assert(value.shape[1:] == self.shape[1:])
if item in self.items:
i, block = self._find_block(item)
if not block.should_store(value):
# delete from block, create and append new block
self._delete_from_block(i, item)
self._add_new_block(item, value)
else:
block.set(item, value)
else:
# insert at end
self.insert(len(self.items), item, value)
def insert(self, loc, item, value):
if item in self.items:
raise Exception('cannot insert %s, already exists' % item)
new_items = self.items.insert(loc, item)
self.set_items_norename(new_items)
# new block
self._add_new_block(item, value)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
new_left, new_right = block.split_block_at(item)
if new_left is not None:
self.blocks.append(new_left)
if new_right is not None:
self.blocks.append(new_right)
def _add_new_block(self, item, value):
# Do we care about dtype at the moment?
# hm, elaborate hack?
loc = self.items.get_loc(item)
new_block = make_block(value, self.items[loc:loc+1].copy(),
self.items)
self.blocks.append(new_block)
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
if item in block:
return i, block
def _check_have(self, item):
if item not in self.items:
raise KeyError('no item named %s' % str(item))
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = _ensure_index(new_axis)
cur_axis = self.axes[axis]
if new_axis.equals(cur_axis):
if copy:
result = self.copy(deep=True)
result.axes[axis] = new_axis
return result
else:
return self
if axis == 0:
assert(method is None)
return self.reindex_items(new_axis)
new_axis, indexer = cur_axis.reindex(new_axis, method)
return self.reindex_indexer(new_axis, indexer, axis=axis)
def reindex_indexer(self, new_axis, indexer, axis=1):
"""
pandas-indexer with -1's only.
"""
if axis == 0:
return self._reindex_indexer_items(new_axis, indexer)
mask = indexer == -1
# TODO: deal with length-0 case? or does it fall out?
needs_masking = len(new_axis) > 0 and mask.any()
new_blocks = []
for block in self.blocks:
newb = block.reindex_axis(indexer, mask, needs_masking,
axis=axis)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(new_blocks, new_axes)
def _reindex_indexer_items(self, new_items, indexer):
# TODO: less efficient than I'd like
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found anywhere
mask = np.zeros(len(item_order), dtype=bool)
new_blocks = []
for blk in self.blocks:
blk_indexer = blk.items.get_indexer(item_order)
selector = blk_indexer != -1
# update with observed items
mask |= selector
if not selector.any():
continue
new_block_items = new_items.take(selector.nonzero()[0])
new_values = com.take_fast(blk.values, blk_indexer[selector],
None, False, axis=0)
new_blocks.append(make_block(new_values, new_block_items,
new_items))
if not mask.all():
na_items = new_items[-mask]
na_block = self._make_na_block(na_items, new_items)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def reindex_items(self, new_items, copy=True):
"""
"""
new_items = _ensure_index(new_items)
data = self
if not data.is_consolidated():
data = data.consolidate()
return data.reindex_items(new_items)
# TODO: this part could be faster (!)
new_items, indexer = self.items.reindex(new_items)
# could have some pathological (MultiIndex) issues here
new_blocks = []
if indexer is None:
for blk in self.blocks:
if copy:
new_blocks.append(blk.reindex_items_from(new_items))
else:
new_blocks.append(blk)
else:
for block in self.blocks:
newb = block.reindex_items_from(new_items, copy=copy)
if len(newb.items) > 0:
new_blocks.append(newb)
mask = indexer == -1
if mask.any():
extra_items = new_items[mask]
na_block = self._make_na_block(extra_items, new_items)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def _make_na_block(self, items, ref_items):
block_shape = list(self.shape)
block_shape[0] = len(items)
block_values = np.empty(block_shape, dtype=np.float64)
block_values.fill(nan)
na_block = make_block(block_values, items, ref_items,
do_integrity_check=True)
return na_block
def take(self, indexer, axis=1):
if axis == 0:
raise NotImplementedError
indexer = np.asarray(indexer, dtype='i4')
n = len(self.axes[axis])
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_axes = list(self.axes)
new_axes[axis] = self.axes[axis].take(indexer)
new_blocks = []
for blk in self.blocks:
new_values = com.take_fast(blk.values, indexer,
None, False, axis=axis)
newb = make_block(new_values, blk.items, self.items)
new_blocks.append(newb)
return BlockManager(new_blocks, new_axes)
def merge(self, other, lsuffix=None, rsuffix=None):
assert(self._is_indexed_like(other))
this, other = self._maybe_rename_join(other, lsuffix, rsuffix)
cons_items = this.items + other.items
consolidated = _consolidate(this.blocks + other.blocks, cons_items)
new_axes = list(this.axes)
new_axes[0] = cons_items
return BlockManager(consolidated, new_axes)
def _maybe_rename_join(self, other, lsuffix, rsuffix, exclude=None,
copydata=True):
to_rename = self.items.intersection(other.items)
if exclude is not None and len(exclude) > 0:
to_rename = to_rename - exclude
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise Exception('columns overlap: %s' % to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
# XXX: COPIES DATA!
this = self.rename_items(lrenamer, copydata=copydata)
other = other.rename_items(rrenamer, copydata=copydata)
else:
this = self
return this, other
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
assert(self.ndim == other.ndim)
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def rename_axis(self, mapper, axis=1):
new_axis = Index([mapper(x) for x in self.axes[axis]])
new_axis._verify_integrity()
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(self.blocks, new_axes)
def rename_items(self, mapper, copydata=True):
new_items = Index([mapper(x) for x in self.items])
new_items._verify_integrity()
new_blocks = []
for block in self.blocks:
newb = block.copy(deep=copydata)
newb.set_ref_items(new_items, maybe_rename=True)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[0] = new_items
return BlockManager(new_blocks, new_axes)
def add_prefix(self, prefix):
f = (('%s' % prefix) + '%s').__mod__
return self.rename_items(f)
def add_suffix(self, suffix):
f = ('%s' + ('%s' % suffix)).__mod__
return self.rename_items(f)
def fillna(self, value):
"""
"""
new_blocks = [b.fillna(value) for b in self.blocks]
return BlockManager(new_blocks, self.axes)
@property
def block_id_vector(self):
# TODO
result = np.empty(len(self.items), dtype=int)
result.fill(-1)
for i, blk in enumerate(self.blocks):
indexer = self.items.get_indexer(blk.items)
assert((indexer != -1).all())
result.put(indexer, i)
assert((result >= 0).all())
return result
@property
def item_dtypes(self):
result = np.empty(len(self.items), dtype='O')
mask = np.zeros(len(self.items), dtype=bool)
for i, blk in enumerate(self.blocks):
indexer = self.items.get_indexer(blk.items)
result.put(indexer, blk.values.dtype.name)
mask.put(indexer, 1)
assert(mask.all())
return result
def form_blocks(data, axes):
# pre-filter out items if we passed it
items = axes[0]
if len(data) < len(items):
extra_items = items - Index(data.keys())
else:
extra_items = []
# put "leftover" items in float bucket, where else?
# generalize?
float_dict = {}
int_dict = {}
bool_dict = {}
object_dict = {}
for k, v in data.iteritems():
if issubclass(v.dtype.type, np.floating):
float_dict[k] = v
elif issubclass(v.dtype.type, np.integer):
int_dict[k] = v
elif v.dtype == np.bool_:
bool_dict[k] = v
else:
object_dict[k] = v
blocks = []
if len(float_dict):
float_block = _simple_blockify(float_dict, items, np.float64)
blocks.append(float_block)
if len(int_dict):
int_block = _simple_blockify(int_dict, items, np.int64)
blocks.append(int_block)
if len(bool_dict):
bool_block = _simple_blockify(bool_dict, items, np.bool_)
blocks.append(bool_block)
if len(object_dict) > 0:
object_block = _simple_blockify(object_dict, items, np.object_)
blocks.append(object_block)
if len(extra_items):
shape = (len(extra_items),) + tuple(len(x) for x in axes[1:])
block_values = np.empty(shape, dtype=float)
block_values.fill(nan)
na_block = make_block(block_values, extra_items, items,
do_integrity_check=True)
blocks.append(na_block)
blocks = _consolidate(blocks, items)
return blocks
def _simple_blockify(dct, ref_items, dtype):
block_items, values = _stack_dict(dct, ref_items, dtype)
# CHECK DTYPE?
if values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
return make_block(values, block_items, ref_items, do_integrity_check=True)
def _stack_dict(dct, ref_items, dtype):
from pandas.core.series import Series
# fml
def _asarray_compat(x):
# asarray shouldn't be called on SparseSeries
if isinstance(x, Series):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
# sparseseries
if isinstance(x, Series):
return len(x),
else:
return x.shape
items = [x for x in ref_items if x in dct]
first = dct[items[0]]
shape = (len(dct),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, item in enumerate(items):
stacked[i] = _asarray_compat(dct[item])
# stacked = np.vstack([_asarray_compat(dct[k]) for k in items])
return items, stacked
def _blocks_to_series_dict(blocks, index=None):
from pandas.core.series import Series
series_dict = {}
for block in blocks:
for item, vec in zip(block.items, block.values):
series_dict[item] = | Series(vec, index=index, name=item) | pandas.core.series.Series |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_getitem_fancy_slice(self):
pass
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPanel(PanelTests, CheckIndexing, SafeForSparse):
def test_constructor_cast(self):
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
assert len(empty.items) == 0
assert len(empty.major_axis) == 0
assert len(empty.minor_axis) == 0
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
assert panel.values.dtype == np.object_
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
assert panel[i].values.dtype.name == dtype
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(
np.random.randn(2, 10, 5),
items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5),
dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index= | lrange(2) | pandas.compat.lrange |
"""
Copyright (c) 2018 Intel Corporation.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
This script is used to train the Logistic Regression model on dataset Testset1 & Testset2
Testset1(1st_test):
bearing 7(bearing 4, y axis), Fail case
bearing 2(bearing 2, x axis), Pass case
Testset2(2nd_test):
bearing 0(bearing 1 ), Fail case
bearing 1(bearing 2), Pass case
"""
import pandas as pd
import numpy as np
import sys
sys.path.insert(0, '../')
from utils import cal_Labels,cal_max_freq,create_dataframe
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from influxdb import DataFrameClient
from sklearn import metrics
import json
import os
testset1_dbname = "1st_test"
testset2_dbname = "2nd_test"
#Connecting to influxDb for loading 1st_test
testset1_influxdb_client = DataFrameClient("localhost", "8086", "admin","admin",testset1_dbname)
testset1_freq_comp1, testset1_freq_comp2, testset1_freq_comp3, testset1_freq_comp4, testset1_freq_comp5 = ( | pd.DataFrame() | pandas.DataFrame |
from __future__ import print_function
import sys
import pandas as pd
from pyranges.pyranges import PyRanges
from io import StringIO
import pyranges as pr
from pyranges.version import __version__
def read_bed(f, as_df=False, nrows=None):
"""Return bed file as PyRanges.
This is a reader for files that follow the bed format. They can have from
3-12 columns which will be named like so:
Chromosome Start End Name Score Strand ThickStart ThickEnd ItemRGB
BlockCount BlockSizes BlockStarts
Parameters
----------
f : str
Path to bed file
as_df : bool, default False
Whether to return as pandas DataFrame instead of PyRanges.
nrows : int, default None
Number of rows to return.
Notes
-----
If you just want to create a PyRanges from a tab-delimited bed-like file,
use `pr.PyRanges(pandas.read_table(f))` instead.
Examples
--------
>>> path = pr.get_example_path("aorta.bed")
>>> pr.read_bed(path, nrows=5)
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 9939 | 10138 | H3K27me3 | 7 | + |
| chr1 | 9953 | 10152 | H3K27me3 | 5 | + |
| chr1 | 9916 | 10115 | H3K27me3 | 5 | - |
| chr1 | 9951 | 10150 | H3K27me3 | 8 | - |
| chr1 | 9978 | 10177 | H3K27me3 | 7 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 5 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> pr.read_bed(path, as_df=True, nrows=5)
Chromosome Start End Name Score Strand
0 chr1 9916 10115 H3K27me3 5 -
1 chr1 9939 10138 H3K27me3 7 +
2 chr1 9951 10150 H3K27me3 8 -
3 chr1 9953 10152 H3K27me3 5 +
4 chr1 9978 10177 H3K27me3 7 -
"""
columns = "Chromosome Start End Name Score Strand ThickStart ThickEnd ItemRGB BlockCount BlockSizes BlockStarts".split(
)
if f.endswith(".gz"):
import gzip
first_start = gzip.open(f).readline().split()[1]
else:
first_start = open(f).readline().split()[1]
header = None
try:
int(first_start)
except ValueError:
header = 0
df = pd.read_csv(
f,
dtype={
"Chromosome": "category",
"Strand": "category"
},
nrows=nrows,
header=header,
sep="\t")
df.columns = columns[:df.shape[1]]
if not as_df:
return PyRanges(df)
else:
return df
def read_bam(f, sparse=True, as_df=False, mapq=0, required_flag=0, filter_flag=1540):
"""Return bam file as PyRanges.
Parameters
----------
f : str
Path to bam file
sparse : bool, default True
Whether to return only.
as_df : bool, default False
Whether to return as pandas DataFrame instead of PyRanges.
mapq : int, default 0
Minimum mapping quality score.
required_flag : int, default 0
Flags which must be present for the interval to be read.
filter_flag : int, default 1540
Ignore reads with these flags. Default 1540, which means that either
the read is unmapped, the read failed vendor or platfrom quality
checks, or the read is a PCR or optical duplicate.
Notes
-----
This functionality requires the library `bamread`. It can be installed with
`pip install bamread` or `conda install -c bioconda bamread`.
Examples
--------
>>> path = pr.get_example_path("control.bam")
>>> pr.read_bam(path)
+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Flag |
| (category) | (int32) | (int32) | (category) | (uint16) |
|--------------+-----------+-----------+--------------+------------|
| chr1 | 887771 | 887796 | + | 16 |
| chr1 | 994660 | 994685 | + | 16 |
| chr1 | 1770383 | 1770408 | + | 16 |
| chr1 | 1995141 | 1995166 | + | 16 |
| ... | ... | ... | ... | ... |
| chrY | 57402214 | 57402239 | + | 16 |
| chrY | 10643526 | 10643551 | - | 0 |
| chrY | 11776321 | 11776346 | - | 0 |
| chrY | 20557165 | 20557190 | - | 0 |
+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 10,000 rows and 5 columns from 25 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> pr.read_bam(path, sparse=False, as_df=True)
Chromosome Start End Strand Flag QueryStart QueryEnd Name Cigar Quality
0 chr1 887771 887796 + 16 0 25 U0 25M None
1 chr1 994660 994685 + 16 0 25 U0 25M None
2 chr1 1041102 1041127 - 0 0 25 U0 25M None
3 chr1 1770383 1770408 + 16 0 25 U0 25M None
4 chr1 1995141 1995166 + 16 0 25 U0 25M None
... ... ... ... ... ... ... ... ... ... ...
9995 chrM 3654 3679 - 0 0 25 U0 25M None
9996 chrM 3900 3925 + 16 0 25 U0 25M None
9997 chrM 13006 13031 + 16 0 25 U0 25M None
9998 chrM 14257 14282 - 0 0 25 U0 25M None
9999 chrM 14257 14282 - 0 0 25 U0 25M None
<BLANKLINE>
[10000 rows x 10 columns]
"""
try:
import bamread
except ModuleNotFoundError as e:
print("bamread must be installed to read bam. Use `conda install -c bioconda bamread` or `pip install bamread` to install it.")
sys.exit(1)
if sparse:
df = bamread.read_bam(f, mapq, required_flag, filter_flag)
else:
try:
df = bamread.read_bam_full(f, mapq, required_flag, filter_flag)
except AttributeError:
print("bamread version 0.0.6 or higher is required to read bam non-sparsely.")
if as_df:
return df
else:
return PyRanges(df)
# return bamread.read_bam(f, mapq, required_flag, filter_flag)
def _fetch_gene_transcript_exon_id(attribute, annotation=None):
no_quotes = attribute.str.replace('"', '').str.replace("'", "")
df = no_quotes.str.extract(
"gene_id.?(.+?);(?:.*transcript_id.?(.+?);)?(?:.*exon_number.?(.+?);)?(?:.*exon_id.?(.+?);)?",
expand=True) # .iloc[:, [1, 2, 3]]
df.columns = "gene_id transcript_id exon_number exon_id".split()
if annotation == "ensembl":
newdf = []
for c in "gene_id transcript_id exon_id".split():
r = df[c].astype(str).str.extract(r'(\d+)').astype(float)
newdf.append(r)
newdf = pd.concat(newdf, axis=1)
newdf.insert(2, "exon_number", df["exon_number"])
df = newdf
return df
def skiprows(f):
try:
import gzip
fh = gzip.open(f)
for i, l in enumerate(fh):
if l.decode()[0] != "#":
break
except (OSError, TypeError): # not a gzipped file, or StringIO
fh = open(f)
for i, l in enumerate(fh):
if l[0] != "#":
break
fh.close()
return i
def read_gtf(f, full=True, as_df=False, nrows=None, duplicate_attr=False):
"""Read files in the Gene Transfer Format.
Parameters
----------
f : str
Path to GTF file.
as_df : bool, default False
Whether to return as pandas DataFrame instead of PyRanges.
nrows : int, default None
Number of rows to read. Default None, i.e. all.
duplicate_attr : bool, default False
Whether to handle (potential) duplicate attributes or just keep last one.
See Also
--------
pyranges.read_gff3 : read files in the General Feature Format
Examples
--------
>>> path = pr.get_example_path("ensembl.gtf")
>>> gr = pr.read_gtf(path)
>>> # +--------------+------------+--------------+-----------+-----------+------------+--------------+------------+-----------------+----------------+-------+
>>> # | Chromosome | Source | Feature | Start | End | Score | Strand | Frame | gene_id | gene_version | +18 |
>>> # | (category) | (object) | (category) | (int32) | (int32) | (object) | (category) | (object) | (object) | (object) | ... |
>>> # |--------------+------------+--------------+-----------+-----------+------------+--------------+------------+-----------------+----------------+-------|
>>> # | 1 | havana | gene | 11868 | 14409 | . | + | . | ENSG00000223972 | 5 | ... |
>>> # | 1 | havana | transcript | 11868 | 14409 | . | + | . | ENSG00000223972 | 5 | ... |
>>> # | 1 | havana | exon | 11868 | 12227 | . | + | . | ENSG00000223972 | 5 | ... |
>>> # | 1 | havana | exon | 12612 | 12721 | . | + | . | ENSG00000223972 | 5 | ... |
>>> # | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
>>> # | 1 | ensembl | transcript | 120724 | 133723 | . | - | . | ENSG00000238009 | 6 | ... |
>>> # | 1 | ensembl | exon | 133373 | 133723 | . | - | . | ENSG00000238009 | 6 | ... |
>>> # | 1 | ensembl | exon | 129054 | 129223 | . | - | . | ENSG00000238009 | 6 | ... |
>>> # | 1 | ensembl | exon | 120873 | 120932 | . | - | . | ENSG00000238009 | 6 | ... |
>>> # +--------------+------------+--------------+-----------+-----------+------------+--------------+------------+-----------------+----------------+-------+
>>> # Stranded PyRanges object has 95 rows and 28 columns from 1 chromosomes.
>>> # For printing, the PyRanges was sorted on Chromosome and Strand.
>>> # 18 hidden columns: gene_name, gene_source, gene_biotype, transcript_id, transcript_version, transcript_name, transcript_source, transcript_biotype, tag, transcript_support_level, ... (+ 8 more.)
"""
_skiprows = skiprows(f)
gr = read_gtf_full(f, as_df, nrows, _skiprows, duplicate_attr)
return gr
def read_gtf_full(f, as_df=False, nrows=None, skiprows=0, duplicate_attr=False):
dtypes = {
"Chromosome": "category",
"Feature": "category",
"Strand": "category"
}
names = "Chromosome Source Feature Start End Score Strand Frame Attribute".split(
)
# names = "Chromosome Start End Score Strand Source Feature Frame Attribute".split()
df_iter = pd.read_csv(
f,
sep="\t",
header=None,
names=names,
dtype=dtypes,
chunksize=int(1e5),
skiprows=skiprows,
nrows=nrows)
_to_rows = to_rows_keep_duplicates if duplicate_attr else to_rows
dfs = []
for df in df_iter:
extra = _to_rows(df.Attribute)
df = df.drop("Attribute", axis=1)
ndf = pd.concat([df, extra], axis=1, sort=False)
dfs.append(ndf)
df = pd.concat(dfs, sort=False)
df.loc[:, "Start"] = df.Start - 1
if not as_df:
return PyRanges(df)
else:
return df
def to_rows(anno):
rowdicts = []
for l in anno:
l = l.replace('"', '').replace(";", "").split()
rowdicts.append({k: v for k, v in zip(*([iter(l)] * 2))})
return pd.DataFrame.from_dict(rowdicts).set_index(anno.index)
def to_rows_keep_duplicates(anno):
rowdicts = []
for l in anno:
rowdict = {}
l = l.replace('"', '').replace(";", "").split()
for k, v in zip(*([iter(l)] * 2)):
if k not in rowdict:
rowdict[k] = v
elif k in rowdict and isinstance(rowdict[k], list):
rowdict[k].append(v)
else:
rowdict[k] = [rowdict[k], v]
rowdicts.append({
k: ','.join(v) if isinstance(v, list) else v
for k, v in rowdict.items()
})
return pd.DataFrame.from_dict(rowdicts).set_index(anno.index)
def read_gtf_restricted(f,
as_df=False,
skiprows=0,
nrows=None):
"""seqname - name of the chromosome or scaffold; chromosome names can be given with or without the 'chr' prefix. Important note: the seqname must be one used within Ensembl, i.e. a standard chromosome name or an Ensembl identifier such as a scaffold ID, without any additional content such as species or assembly. See the example GFF output below.
# source - name of the program that generated this feature, or the data source (database or project name)
feature - feature type name, e.g. Gene, Variation, Similarity
start - Start position of the feature, with sequence numbering starting at 1.
end - End position of the feature, with sequence numbering starting at 1.
score - A floating point value.
strand - defined as + (forward) or - (reverse).
# frame - One of '0', '1' or '2'. '0' indicates that the first base of the feature is the first base of a codon, '1' that the second base is the first base of a codon, and so on..
attribute - A semicolon-separated list of tag-value pairs, providing additional information about each feature."""
dtypes = {
"Chromosome": "category",
"Feature": "category",
"Strand": "category"
}
df_iter = pd.read_csv(
f,
sep="\t",
comment="#",
usecols=[0, 2, 3, 4, 5, 6, 8],
header=None,
names="Chromosome Feature Start End Score Strand Attribute".split(),
dtype=dtypes,
chunksize=int(1e5),
nrows=nrows)
dfs = []
for df in df_iter:
# Since Start is 1-indexed
df.Start -= 1
if sum(df.Score == ".") == len(df):
cols_to_concat = "Chromosome Start End Strand Feature".split()
else:
cols_to_concat = "Chromosome Start End Strand Feature Score".split(
)
extract = _fetch_gene_transcript_exon_id(df.Attribute)
extract.columns = "gene_id transcript_id exon_number exon_id".split()
extract.exon_number = extract.exon_number.astype(float)
df = | pd.concat([df[cols_to_concat], extract], axis=1, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
"""
This module contains methods for constructing timeseries figures.
================================================================================
@Author:
| <NAME>, NSSC Contractor (ORAU)
| U.S. EPA / ORD / CEMM / AMCD / SFSB
Created:
Mon Jan 27 08:49:12 2020
Last Updated:
Wed Jul 28 14:19:13 2021
"""
import os
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib.patches import FancyBboxPatch
from matplotlib.colors import rgb2hex
import seaborn as sns
from sensortoolkit.param import Parameter
from sensortoolkit.datetime_utils import get_todays_date
from sensortoolkit.plotting import wrap_text, get_colormap_range
from sensortoolkit.deploy import get_max_conc
register_matplotlib_converters()
def sensor_timeplot(df_list, ref_df, param=None, sensor_name=None,
figure_path=None, bdate=None, edate=None,
write_to_file=True, sensor_serials=None, ref_name=None,
averaging_interval=None, return_mpl_obj=True,
report_fmt=False, ax=None, fig=None, **kwargs):
"""Generate a timeplot for a specified pollutant alongside FRM/FEM
concentration values.
Args:
df_list (list):
List of Pandas dataframes containing sensor data.
ref_df (pandas DataFrame):
Reference dataset.
param (str, optional):
Column header name for the pollutant values to be plotted. Defaults
to None.
sensor_name (str, optional):
Unformatted sensor name, passed to Formatted_Sensor_Name() for
including formatted version on plot. Defaults to None.
figure_path (str, optional):
Path to directory where the figure will be saved. Defaults to None.
bdate (str, optional):
Date ('yyyy-mm-dd' format) for beginning of timeseries plot.
Defaults to None.
edate (str, optional):
Date ('yyyy-mm-dd' format) for end of timeseries plot.
Defaults to None.
write_to_file (bool, optional):
If true, figure is written to file and interactive plot is closed.
Defaults to True.
sensor_serials (dict, optional):
Optional pass sensor serials dictionary to plot sensor serial IDs
in the legend instead of numbered sensors. Defaults to None.
ref_name (str, optional):
The name of the FRM/FEM monitor (make and model). Defaults to None.
averaging_interval (str, optional):
The measurement averaging intervals commonly utilized for analyzing
data corresponding the the selected parameter. Defaults to None.
return_mpl_obj (bool, optional):
If true, will return a Matplotlib axes instance (useful for
iteration over subplot axes if this plotting function is called
within a for-loop that is iterating over the axes elements in
a Matplotlib subplot object). Defaults to True.
report_fmt (bool, optional):
If true, select formatting presets for displaying figures on the
reporting template for sensor performance evaluations included
alongside US EPA's performance targets documents for air sensors.
Defaults to False.
ax (matplotlib.axes._subplots.AxesSubplot, optional):
Optional, the Matplotlib Axes object on which plotting elements
will be drawn. Useful is the user is iterating over the Axes
elements of a Matplotlib figure in a for-loop outside this plotting
function. Within the loop, calls to this function can be made to add
elements for each axes object. Defaults to None.
fig (Matplotlib.figure.Figure, optional):
Optional, the Matplotlib figure on which axes object elements are
drawn. Useful is the user is iterating over the Axes elements of a
Matplotlib figure in a for-loop outside this plotting function.
Within the loop, calls to this function can be made to add elements
for each axes object. Defaults to None.
**Keyword Arguments:**
:param str seaborn_style:
The plotting style based on seaborn's style options. Defaults to
'darkgrid'. Values must be a valid seaborn style name.
:param int date_interval:
Number of days between x-axis tick marks with 'mm-dd-yy' timestamps.
:param str yscale:
The scaling for the y-axis. Accepted values include 'linear',
'log', 'symlog', 'logit', etc.
:param ylims:
Set the y-limits of the plot.
:type ylims: Two-element tuple of floats or ints
:param bool format_xaxis_weeks:
Plot the timeseries x-axis (time) in increments of 1 week. Defaults
to False.
:param fig_size:
The dimensions (width, height) in inches of the Matplotlib figure to
be drawn. Defaults to (16, 3.5).
:type fig_size: Two-element tuple
:param fontsize int or float:
The font size for the xlabel, ylabel, and plot text. Passed on to
Draw_Scatter() which uses 0.85*font_size for tick labels.
:type fontsize: int or float
:param font legend_fontscale:
Relative scale of fontsize for text in the legend relative to label
text.
:param str cmap_name:
The name of the Matplotlib colormap that will be used when drawing
plot elements. Defaults to ``'Set1'``. A full list of colormaps is
can be found in the `Matplotlib documentation <https://matplotlib.org/stable/gallery/color/colormap_reference.html>`_
:param cmap_normrange:
Normalized range (0,1) for colormap hue selection. Limiting this
range to something like (0.1, 0.9) is useful when using colormaps
with high contrast extrema and a gradual change in hue is desired
for plots.
:type cmap_normrange: Two-element tuple
:param bool show_title:
If true, display the title for the figure. Defaults to True.
:param str filename_suffix:
Optional string added to end of filename. Defaults to empty string.
:param float box_xscale:
Scalar value for translating the x-coordinates of the Matplotlib
axes object box in which the plot is drawn. Passed to the
``Matplotlib.Axes.set_position()`` method for adjusting the axes
box dimensions relative to the Matplotlib figure coordinates.
:param float box_yscale:
Scalar value for translating the y-coordinates of the Matplotlib
axes object box in which the plot is drawn. Passed to the
``Matplotlib.Axes.set_position()`` method for adjusting the axes
box dimensions relative to the Matplotlib figure coordinates.
:param float box_wscale:
Scalar value for transforming the x-range (width) of the Matplotlib
axes object box in which the plot is drawn. Passed to the
``Matplotlib.Axes.set_position()`` method for adjusting the axes
box dimensions relative to the Matplotlib figure coordinates.
:param float box_hscale:
Scalar value for transforming the y-range (height) of the Matplotlib
axes object box in which the plot is drawn. Passed to the
``Matplotlib.Axes.set_position()`` method for adjusting the axes
box dimensions relative to the Matplotlib figure coordinates.
:param legend_loc:
The x and y coordinate of center of the legend (relative to the axes
object coordinates).
:type legend_loc: Two-element tuple
:param list sensor_colors:
Set the list of colors for drawing sensor line plots. Default set by
chosen colormap (cmap_name) and the normalized range for the colormap.
:param float sensor_linealpha:
The transparency of the lines indicating sensor measurements. Defaults
to 0.70.
:param float sensor_linewidth:
The width (thickness) of the lines indicating sensor measurements.
Defaults to 1.5.
:param str sensor_linestyle:
The style of the lines indicating sensor measurements. Passed
to matplotlib linestyles. Defaults to '-'.
:param str ref_linecolor:
The color of the line indicating reference measurements. Defaults
to 'k'.
:param float ref_linealpha:
The transparency of the line indicating reference measurements.
Defaults to 0.97.
:param float ref_linewidth:
The width (thickness) of the line indicating reference measurements.
Defaults to 1.5.
:param str ref_linestyle:
The style of the lines indicating reference measurements. Passed
to matplotlib linestyles. Defaults to '-'.
:param str date_format:
The strftime format in which dates will be displayed along the x-axis
if 'format_xaxis_weeks' is False. Defaults to "%m-%d-%y".
:param float legend_fontsize:
Value by which to scale the legend text font size relative to the value
of the fontsize argument. Defaults to 0.72.
:param subplots_adjust:
Modify the bounds of the subplot [x-min, x-max, y-max, y-min]. If
unique_ax_obj is True, defaults to (0.05, 0.9, 0.90, 0.15).
:type subplots_adjust: Four-element tuple.
Returns:
ax (matplotlib axes object):
If return_mpl_obj is True or report_fmt is True, return ax object,
else return None.
"""
# Determine maximum concentration recorded during timeframe, use to set
# default ylim
max_conc = get_max_conc(param, df_list=df_list, ref_df=ref_df,
bdate=bdate, edate=edate)
cmap_range = get_colormap_range(df_list)
# Get keyword argument values if specified, otherwise set default
sns.set_style(kwargs.get('seaborn_style', 'darkgrid'))
date_interval = kwargs.get('date_interval', 5)
yscale = kwargs.get('yscale', 'linear')
ylims = kwargs.get('ylims', (-1, 1.25*max_conc))
format_xaxis_weeks = kwargs.get('format_xaxis_weeks', False)
fig_size = kwargs.get('fig_size', (16, 3.5))
fontsize = kwargs.get('fontsize', 15)
legend_fontscale = kwargs.get('legend_fontscale', 0.72)
cmap_name = kwargs.get('cmap_name', 'Set1')
cmap_norm_range = kwargs.get('cmap_normrange', cmap_range)
show_title = kwargs.get('show_title', True)
filename_suffix = kwargs.get('filename_suffix', '')
# Format parameter name and sensor name
param_obj = Parameter(param)
param_name = param_obj.name
param_averaging = param_obj.averaging
fmt_param = param_obj.format_name
fmt_param_units = param_obj.units
fmt_sensor_name = sensor_name.replace('_', ' ')
# Performance target reporting template formatting for timeseries plots
if report_fmt is True:
# Figure consists of two plots (1-hr and 24-hr averaged
# timeseries [plots arranged as 2 rows, 1 column])
if len(param_averaging) == 2:
fontsize = 10.5
fig_size = (10.15, 4.1)
show_title = True
# Scaling values for axes box
box_xscale = kwargs.get('box_xscale', 0.45) # Translate x-axis loc
box_yscale = kwargs.get('box_yscale', 1.0) # Translate y-axis loc
box_wscale = kwargs.get('box_wscale', 1.0) # Transform plot width
box_hscale = kwargs.get('box_hscale', 1.12) # Transform plot height
title_xpos = 0.5
# x, y loc of legend (w.r.t axes obj)
legend_pos = kwargs.get('legend_loc', (1.11, 1.29))
columnspacing = 0.9 # Legend column spacing
# Figure consists of one timeseries plot (1-hr)
# [plot arranged as 1 row, 1 column])
elif len(param_averaging) == 1:
fontsize = 11
fig_size = (10.16, 3.8)
show_title = False
box_xscale = kwargs.get('box_xscale', 1.1) # Translate x-axis loc
box_yscale = kwargs.get('box_yscale', 3.1) # Translate y-axis loc
box_wscale = kwargs.get('box_wscale', 1.02) # Transform plot width
box_hscale = kwargs.get('box_hscale', 0.67) # Transform plot height
# Modify the bounds of the subplot [x_l, x_r, y_u, y_l]
kwargs['subplots_adjust'] = (0.05, 0.98, 0.95, 0.5)
title_xpos = 0.5
legend_pos = kwargs.get('legend_loc', (0.85, -0.55))
columnspacing = 0.9
# reassign report_fmt false so that generic formatting is selected.
else:
report_fmt = False
# Generic figure formatting
if report_fmt is False:
box_xscale = kwargs.get('box_xscale', 1.0)
box_yscale = kwargs.get('box_yscale', 1.2)
box_wscale = kwargs.get('box_wscale', 0.94)
box_hscale = kwargs.get('box_hscale', 0.94)
title_xpos = kwargs.get('title_xloc', 0.5)
legend_pos = kwargs.get('legend_loc', (1.06, 0.5))
columnspacing = 1
# Format the legend, determine how many columns to split legend into
n_legend_objs = len(sensor_serials) + 1
if n_legend_objs / 4 > 1:
leg_ncol = 2
else:
leg_ncol = 1
# Use only one legend column if serial IDs are long
if max(len(i) for i in sensor_serials.values()) > 6:
leg_ncol = 1
if (ax and fig) is None:
# No axes object passed to function, create unique fig, axes objects
fig, ax = plt.subplots(1, 1, figsize=fig_size)
unique_ax_obj = True
else:
# Axes object passed to function, set axes within scope of function to
# passed axes object.
ax = ax
unique_ax_obj = False
if show_title is True:
title_str = (averaging_interval + " Averaged " + fmt_sensor_name + ' '
+ fmt_param)
ax.set_title(title_str, fontsize=fontsize*1.1, x=title_xpos)
# Set the colormap and configure the range of hues that will be sampled
if len(df_list) == 1:
monocolor = kwargs.get('monocolor', '#0048AD')
colors = [monocolor]
else:
colormap = plt.cm.get_cmap(cmap_name)
cmap_lbound = cmap_norm_range[0]
cmap_ubound = cmap_norm_range[1]
colors = [colormap(i) for i in np.linspace(cmap_lbound, cmap_ubound,
len(df_list))]
ax.set_prop_cycle('color', kwargs.get('sensor_colors', colors))
# Loop through sensor dataframes, check data present, plot data
for i, df in enumerate(df_list):
try:
param_data = df[param + '_Value']
except KeyError as missing_param:
print('...Warning', missing_param, 'not found in dataframe'
'at index ', str(i))
continue
# Choose between serial ID and sensor number labels for plot legend
if sensor_serials:
lbl = list(sensor_serials.values())[i]
else:
lbl = 'Sensor ' + str(i + 1)
# Plot each sensor data time series
ax.plot(df.index, param_data, label=lbl,
alpha=kwargs.get('sensor_linealpha', .70),
linewidth=kwargs.get('sensor_linewidth', 1.5),
linestyle=kwargs.get('sensor_linestyle', '-'))
# Plot timeseries for regulatory monitor corresponding to the pollutant
ax.plot(ref_df.index,
ref_df[param + '_Value'],
label=ref_name,
color=kwargs.get('ref_linecolor', 'k'),
alpha=kwargs.get('ref_linealpha', .97),
linewidth=kwargs.get('ref_linewidth', 1.5),
linestyle=kwargs.get('ref_linestyle', '-'))
# Configure x- and y-axis attributes (scale, labeling, limits, ticks)
ax.set_yscale(yscale)
ax.set_ylabel(f'{fmt_param} ({fmt_param_units})', fontsize=fontsize)
ax.set_xlabel('Date', fontsize=fontsize)
ax.set_xlim(pd.to_datetime(bdate), pd.to_datetime(edate))
if ylims:
ax.set_ylim(ylims[0], ylims[1])
ax.tick_params(labelsize=.75*fontsize)
# Format x-axis by weeks (mark 'Week 1', 'Week 2', etc..)
if format_xaxis_weeks is True:
week_freq = 1 # Initially mark every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=week_freq))
labels = [item.get_text() for item in ax.get_xticklabels()]
# If the number of weekly labels exceeds 15, reduce weekly interval
# until less than 15 left.
while len(labels) > 15:
week_freq += 1
ax.xaxis.set_major_locator(mdates.WeekdayLocator(
interval=week_freq))
labels = [item.get_text() for item in ax.get_xticklabels()]
for i, label in enumerate(labels):
labels[i] = 'Week ' + str(int(week_freq*i+1))
ax.set_xticklabels(labels)
ax.set_xlabel('Duration', fontsize=fontsize)
# Format x-axis by date and time [MM-DD-YY] at specified date interval
else:
date_form = DateFormatter(kwargs.get('date_format', "%m-%d-%y"))
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.DayLocator(interval=date_interval))
# Set legend position, wrap legend text to fit
handles, labels = ax.get_legend_handles_labels()
updated_labels = wrap_text(labels)
ax.legend(handles, updated_labels, bbox_to_anchor=legend_pos, loc='center',
fontsize=kwargs.get('legend_fontsize',
fontsize*legend_fontscale),
ncol=leg_ncol, columnspacing=columnspacing,
handlelength=1.25)
# Adjust axes dimensions to fit width and height of plot
box = ax.get_position()
ax.set_position([box.x0*box_xscale, box.y0*box_yscale,
box.width*box_wscale, box.height*box_hscale])
if unique_ax_obj is True:
subplot_adjust = kwargs.get('subplots_adjust',
(0.05, 0.9, 0.90, 0.15))
fig.subplots_adjust(left=subplot_adjust[0],
right=subplot_adjust[1],
top=subplot_adjust[2],
bottom=subplot_adjust[3])
# Save image to folder at figure_path
if write_to_file is True:
todays_date = get_todays_date()
figure_path = os.path.join(figure_path, param,
f'{sensor_name}_timeseries_{param}_{averaging_interval}')
# Optionally add suffix to filename
if filename_suffix != '':
figure_path = figure_path + '_' + filename_suffix
# Indicate performance targets template formatted, remove time interval
# info if multiple subplots for 1-hr, 24-hr data used
if report_fmt is True or unique_ax_obj is False:
figure_path = figure_path + '_' + 'report_fmt'
figure_path = figure_path.replace('_' + averaging_interval, '')
# Filename suffix for harmonized sensor datasets
if param.startswith('corrected'):
figure_path = figure_path + '_' + 'corrected'
figure_path += '_' + todays_date + '.png'
plt.savefig(figure_path, dpi=300)
plt.close()
if return_mpl_obj is True or report_fmt is True:
return ax
def deployment_timeline(deployment_df, cmap_name='Dark2',
cmap_norm_range=(.0, .75), fontsize=10,
date_interval=1, fig_size=(11, 7),
write_to_file=True, figure_path=None,
tight_layout=False):
"""A horizontal bar chart indicating the timeline during which sensors
were deployed.
Sensors are depicted as stacked, colored horizontal bars spanning the
beginning and end date of the deployment period.
More details about line 70-82 code on barh rounding at MatPlotLib
documentation on Fancybox https://matplotlib.org/3.1.1/gallery/
shapes_and_collections/fancybox_demo.html
Other discrete colormaps: tab10_r, Dark2, Set2_r, tab20b
Args:
deployment_df (pandas DataFrame):
Dataframe containing the starting and ending timestamp for the
deployment of each air sensor in the testing group.
cmap_name (str, optional):
The name of the Matplotlib colormap that will be used when drawing
plot elements. Defaults to ``'Dark2'``. A full list of colormaps is
can be found in the `Matplotlib documentation <https://matplotlib.org/stable/gallery/color/colormap_reference.html>`_
cmap_norm_range (Two-element tuple, optional):
A two-element tuple containing the normalized range of the colormap
values that will be displayed in the figure. The full range of the
selected colormap can be selected by passing (0, 1). Hues will
be selected at equally spaced intervals within the normalized
colormap range specified. Defaults to (.0, .75).
fontsize (int or float, optional):
The font size for text in figure elements. Defaults to 10.
date_interval (int, optional):
The interval (in months) at which dates along the x-axis of the
figure will be indicated. Defaults to 1 (i.e., one month per tick).
fig_size (Two-element tuple, optional):
The dimensions (width, height) in inches of the Matplotlib figure to
be drawn. Defaults to (11, 7).
write_to_file (bool, optional):
If true, the figure will be saved as a png image to the
``[project_path]/figures`` subdirectory. Defaults to True.
figure_path (str):
The full directory path to the folder where figures are saved.
This should be located at ``[project_path]/figures``.
tight_layout (bool, optional):
If True, Matplotlib's ``tight_layout()`` method will be applied to
reduce the padding around the subplot. Defaults to False.
Returns:
None.
"""
unique_types = sorted(deployment_df['Sensor Name'].unique().tolist())
colormap = plt.cm.get_cmap(cmap_name)
cmap_lbound = cmap_norm_range[0]
cmap_ubound = cmap_norm_range[1]
colors = [rgb2hex(colormap(i)[:3]) for i in np.linspace(cmap_lbound,
cmap_ubound,
len(unique_types))]
fig, ax = plt.subplots(1, 1, figsize=fig_size)
wspace = 0.0
hspace = 0.1
left = 0.1
right = 0.99
top = 0.89
bottom = 0.1
fig.subplots_adjust(wspace=wspace,
hspace=hspace,
left=left,
right=right,
top=top,
bottom=bottom)
for sensor_type, c in zip(unique_types, colors):
sensor_type_data = deployment_df.where(
deployment_df['Sensor Name'] ==
sensor_type).dropna()
bdate = mdates.date2num(pd.to_datetime(sensor_type_data.Begin))
edate = mdates.date2num(pd.to_datetime(sensor_type_data.End))
duration = edate - bdate
fmt_sensor_type = sensor_type.replace('_', ' ')
ax.barh(sensor_type_data.Sensor_Serial, duration, .8,
left=bdate, color=c, alpha=.8, label=fmt_sensor_type)
ax.xaxis_date()
ax.legend(fontsize=0.8*fontsize, loc='upper center',
bbox_to_anchor=(0.5, 1.1), ncol=len(unique_types))
# Pad the x-axis a little bit on either side of sensor deployment periods
x_start = pd.to_datetime(
mdates.num2date(ax.get_xlim()[0])).tz_localize(None)
new_x_start = x_start - | pd.offsets.MonthBegin(1) | pandas.offsets.MonthBegin |
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 10:28:35 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
"""
import pickle
from database import postSQL2gpd,gpd2postSQL
import pandas as pd
xian_epsg=32649 #Xi'an WGS84 / UTM zone 49N
wgs84_epsg=4326
poi_classificationName={
0:"delicacy",
1:"hotel",
2:"shopping",
3:"lifeService",
4:"beauty",
5:"spot",
6:"entertainment",
7:"sports",
8:"education",
9:"media",
10:"medicalTreatment",
11:"carService",
12:"trafficFacilities",
13:"finance",
14:"realEstate",
15:"corporation",
16:"government",
17:"entrance",
18:"naturalFeatures",
}
poi_classificationName_reverse={v:k for k,v in poi_classificationName.items()}
def street_poi_structure(poi,position,distance=300):
from tqdm import tqdm
import pickle,math
import pandas as pd
import numpy as np
import geopandas as gpd
# tqdm.pandas()
poi_num=len(poi_classificationName.keys())
feature_vector=np.zeros(poi_num)
poi_=poi.copy(deep=True)
pos_poi_dict={}
pos_poi_idxes_df=pd.DataFrame(columns=['geometry','frank_e','num'])
pos_poi_feature_vector_df=pd.DataFrame(columns=['geometry']+list(range(poi_num)))
# print(pos_poi_feature_vector)
for idx,row in tqdm(position.iterrows(),total=position.shape[0]):
poi_['within']=poi_.geometry.apply(lambda pt: pt.within(row.geometry.buffer(distance)))
# print(poi_)
poi_selection_df=poi_[poi_['within']==True]
counts=poi_selection_df.level_0.value_counts().to_dict()
num=len(poi_selection_df)
counts_percent={k:v/num for k,v in counts.items()}
# print(counts_percent)
ve=0.0
for v in counts_percent.values():
if v!=0.:
ve-=v*math.log(v)
max_entropy=math.log(num)
frank_e=ve/max_entropy*100
# print(max_entropy,frank_e)
for k,v in counts.items(): #计算特征聚类出现的频数/直方图
poi_name=k.split("_")[-1]
poi_idx=poi_classificationName_reverse[poi_name]
# print(poi_idx,v)
feature_vector[poi_idx]=v
# print(feature_vector)
pos_poi_dict.update({idx:{'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx ,'counts':counts,'counts_percent':counts_percent,'feature_vector':feature_vector,'num':num,'frank_e':frank_e,'geometry':row.geometry}})
pos_poi_idxes_df=pos_poi_idxes_df.append({'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx,'geometry':row.geometry,'frank_e':frank_e,'num':num},ignore_index=True)
feature_vector_dict={i:feature_vector[i] for i in range(len(feature_vector))}
feature_vector_dict.update({'geometry':row.geometry,'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx,})
pos_poi_feature_vector_df=pos_poi_feature_vector_df.append(feature_vector_dict,ignore_index=True)
# if idx==3:break
pos_poi_idxes_gdf=gpd.GeoDataFrame(pos_poi_idxes_df,geometry=pos_poi_idxes_df.geometry,crs=position.crs)
pos_poi_idxes_gdf['num_diff']=pos_poi_idxes_gdf.num.diff()
pos_poi_feature_vector_gdf=gpd.GeoDataFrame(pos_poi_feature_vector_df,geometry=pos_poi_feature_vector_df.geometry,crs=position.crs)
with open('./processed data/pos_poi_dict.pkl','wb') as f:
pickle.dump(pos_poi_dict,f)
return pos_poi_idxes_gdf,pos_poi_feature_vector_gdf
def poi_feature_clustering(feature_vector,fields,n_clusters=7,feature_analysis=True):
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sklearn import cluster
from shapely.geometry import Point
import geopandas as gpd
import pyproj
from yellowbrick.cluster import KElbowVisualizer
from yellowbrick.features import Manifold
from sklearn.feature_selection import chi2, SelectKBest, f_classif
from sklearn import preprocessing
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
pts_geometry=feature_vector[['geometry']]
pts_geometry[['x','y']]=pts_geometry.geometry.apply(lambda row:pd.Series([row.x,row.y]))
# print(pts_geometry)
pts_coordis=pts_geometry[['x','y']].to_numpy()
# print(pts_coordis)
nbrs=NearestNeighbors(n_neighbors=9, algorithm='ball_tree').fit(pts_coordis)
connectivity=nbrs.kneighbors_graph(pts_coordis)
# print(connectivity.toarray())
X_=feature_vector[fields].to_numpy()
X=normalize(X_,axis=0, norm='max')
clustering=cluster.AgglomerativeClustering(connectivity=connectivity,n_clusters=n_clusters).fit(X)
feature_vector['clustering']=clustering.labels_
#_________________________________________________________________________
if feature_analysis==True:
y=clustering.labels_
selector=SelectKBest(score_func=f_classif, k=len(fields)) #score_func=chi2
selector.fit(X,y)
dfscores = pd.DataFrame(selector.scores_)
dfpvalues=pd.DataFrame(selector.pvalues_)
dfcolumns = pd.DataFrame(fields)
featureScores = | pd.concat([dfcolumns,dfscores,dfpvalues],axis=1) | pandas.concat |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group( | offsets.Week(weekday=1) | pandas.tseries.offsets.Week |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyfora.pandas_util
import pyfora.algorithms
import pyfora.algorithms.LinearRegression as LinearRegression
import pyfora.pure_modules.pure_pandas as PurePandas
import numpy
import pandas
import pandas.util.testing
import random
class InMemoryPandasTestCases(object):
def checkFramesEqual(self, df1, df2):
pandas.util.testing.assert_frame_equal(df1, df2)
return True
def checkSeriesEqual(self, series1, series2):
pandas.util.testing.assert_series_equal(series1, series2)
return True
def test_pandas_series_basic(self):
s = pandas.Series(range(10))
def f():
return s
self.equivalentEvaluationTest(f)
def test_repeated_dataframe_ctor(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f():
return pandas.DataFrame(df)
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_repeated_series_ctor(self):
s = pandas.Series([1,2,3])
def f():
return pandas.Series(s)
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkSeriesEqual
)
def test_pandas_dataframes_basic(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f():
return df
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_series_indexing_1(self):
s = | pandas.Series(4) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas
import numpy
import sys
import unittest
from datetime import datetime
from pandas.testing import assert_frame_equal, assert_series_equal
import os
import copy
sys.path.append("..")
import warnings
import nPYc
from nPYc.enumerations import SampleType
from nPYc.enumerations import AssayRole
from nPYc.enumerations import VariableType
from generateTestDataset import generateTestDataset
import tempfile
from isatools import isatab
class test_msdataset_synthetic(unittest.TestCase):
"""
Test MSDataset object functions with synthetic data
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata = pandas.DataFrame(
{'Sample File Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'Sample Base Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'AssayRole': [AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference],
'SampleType': [SampleType.StudySample, SampleType.StudyPool, SampleType.ExternalReference],
'Sample Name': ['Sample1', 'Sample2', 'Sample3'], 'Acqu Date': ['26-May-17', '26-May-17', '26-May-17'],
'Acqu Time': ['16:42:57', '16:58:49', '17:14:41'], 'Vial': ['1:A,1', '1:A,2', '1:A,3'],
'Instrument': ['XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest'],
'Acquired Time': [datetime(2017, 5, 26, 16, 42, 57), datetime(2017, 5, 26, 16, 58, 49),
datetime(2017, 5, 26, 17, 14, 41)], 'Run Order': [0, 1, 2], 'Batch': [1, 1, 2],
'Correction Batch': [numpy.nan, 1, 2], 'Matrix': ['U', 'U', 'U'],
'Subject ID': ['subject1', 'subject1', 'subject2'], 'Sample ID': ['sample1', 'sample2', 'sample3'],
'Dilution': [numpy.nan, '60.0', '100.0'],'Exclusion Details': ['','','']})
self.msData.featureMetadata = pandas.DataFrame(
{'Feature Name': ['Feature1', 'Feature2', 'Feature3'], 'Retention Time': [6.2449, 2.7565, 5.0564],
'm/z': [249.124281, 381.433191, 471.132083]})
self.msData.featureMetadata['Exclusion Details'] = None
self.msData.featureMetadata['User Excluded'] = False
self.msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.msData.featureMetadata.index)
self.msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.msData.featureMetadata.index)
self.msData._intensityData = numpy.array([[10.2, 20.95, 30.37], [10.1, 20.03, 30.74], [3.065, 15.83, 30.16]])
# Attributes
self.msData.Attributes['FeatureExtractionSoftware'] = 'UnitTestSoftware'
# excluded data
self.msData.sampleMetadataExcluded = []
self.msData.intensityDataExcluded = []
self.msData.featureMetadataExcluded = []
self.msData.excludedFlag = []
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[0, :])
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata)
self.msData.excludedFlag.append('Samples')
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[:, 0])
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata)
self.msData.excludedFlag.append('Features')
# finish
self.msData.VariableType = VariableType.Discrete
self.msData.initialiseMasks()
def test_rsd_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='No reference samples'):
msData.sampleMetadata = pandas.DataFrame(None)
with self.assertRaises(ValueError):
msData.rsdSP
with self.subTest(msg='Only one reference sample'):
msData.sampleMetadata = pandas.DataFrame([[nPYc.enumerations.AssayRole.PrecisionReference, nPYc.enumerations.SampleType.StudyPool]], columns=['AssayRole', 'SampleType'])
with self.assertRaises(ValueError):
msData.rsdSP
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
msData = nPYc.MSDataset('', fileType='empty')
msData.sampleMetadata['Sample File Name'] = ['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02_x',
'Test2_RPOS_ToF02_U2W03_b',
'Test3_RNEG_ToF03_S3W04_2',
'Test4_RPOS_ToF04_B1S1_SR_q',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01_9',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21']
msData._getSampleMetadataFromFilename(msData.Attributes['filenameSpec'])
##
# Check basename
##
basename = pandas.Series(['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_RPOS_ToF04_B1S1_SR',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21'],
name='Sample Base Name',
dtype='str')
assert_series_equal(msData.sampleMetadata['Sample Base Name'], basename)
##
# Check Study
##
study = pandas.Series(['Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test1',
'Test1'],
name='Study',
dtype='str')
assert_series_equal(msData.sampleMetadata['Study'], study)
##
#
##
chromatography = pandas.Series(['H',
'R',
'R',
'L',
'L',
'H',
'H',
'R',
'R',
'R',
'L',
'L',
'H',
'H',
'H'],
name='Chromatography',
dtype='str')
assert_series_equal(msData.sampleMetadata['Chromatography'], chromatography)
##
#
##
ionisation = pandas.Series(['POS',
'POS',
'NEG',
'POS',
'NEG',
'POS',
'POS',
'POS',
'NEG',
'POS',
'POS',
'NEG',
'POS',
'POS',
'POS'],
name='Ionisation',
dtype='str')
assert_series_equal(msData.sampleMetadata['Ionisation'], ionisation)
##
#
##
instrument = pandas.Series(['ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF06',
'ToF06',
'ToF06'],
name='Instrument',
dtype='str')
assert_series_equal(msData.sampleMetadata['Instrument'], instrument)
##
#
##
reRun = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'b',
'',
'q',
'',
'',
'',
'',
''],
name='Re-Run',
dtype='str')
assert_series_equal(msData.sampleMetadata['Re-Run'], reRun)
##
#
##
suplemental = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'',
'2',
'',
'',
'9',
'',
'',
''],
name='Suplemental Injections',
dtype='str')
assert_series_equal(msData.sampleMetadata['Suplemental Injections'], suplemental)
##
#
##
skipped = pandas.Series([False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False],
name='Skipped',
dtype='bool')
assert_series_equal(msData.sampleMetadata['Skipped'], skipped)
##
#
##
matrix = pandas.Series(['P',
'U',
'S',
'P',
'U',
'S',
'P',
'U',
'S',
'',
'',
'',
'',
'',
''],
name='Matrix',
dtype='str')
assert_series_equal(msData.sampleMetadata['Matrix'], matrix)
##
#
##
well = pandas.Series([2,
3,
4,
5,
6,
5,
2,
3,
4,
1,
2,
1,
-1,
-1,
-1],
name='Well',
dtype='int')
assert_series_equal(msData.sampleMetadata['Well'], well, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Well'].dtype.kind, well.dtype.kind)
##
#
##
plate = pandas.Series([1,
2,
3,
4,
5,
4,
1,
2,
3,
1,
2,
3,
1,
2,
21],
name='Plate',
dtype='int')
assert_series_equal(msData.sampleMetadata['Plate'], plate, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Plate'].dtype.kind, well.dtype.kind)
##
#
##
batch = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
2.0,
3.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Batch',
dtype='float')
assert_series_equal(msData.sampleMetadata['Batch'], batch)
##
#
##
dilution = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(msData.sampleMetadata['Dilution'], dilution)
##
#
##
assayRole = pandas.Series([AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.Assay,
AssayRole.Assay],
name='AssayRole',
dtype=object)
assert_series_equal(msData.sampleMetadata['AssayRole'], assayRole)
##
#
##
sampleType = pandas.Series([SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.StudyPool,
SampleType.MethodReference,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.ProceduralBlank,
SampleType.StudyPool,
SampleType.StudyPool],
name='SampleType',
dtype=object)
assert_series_equal(msData.sampleMetadata['SampleType'], sampleType)
def test_updateMasks_features(self):
msData = nPYc.MSDataset('', fileType='empty')
msData.Attributes['artifactualFilter'] = True
##
# Variables:
# Good Corr, Good RSD
# Poor Corr, Good RSD
# Good Corr, Poor RSD
# Poor Corr, Poor RSD
# Good Corr, Good RSD, below blank
##
msData.intensityData = numpy.array([[100, 23, 99, 51, 100],
[90, 54, 91, 88, 91],
[50, 34, 48, 77, 49],
[10, 66, 11, 56, 11],
[1, 12, 2, 81, 2],
[50, 51, 2, 12, 49],
[51, 47, 1, 100, 50],
[47, 50, 70, 21, 48],
[51, 49, 77, 91, 50],
[48, 49, 12, 2, 49],
[50, 48, 81, 2, 51],
[54, 53, 121, 52, 53],
[57, 49, 15, 51, 56],
[140, 41, 97, 47, 137],
[52, 60, 42, 60, 48],
[12, 48, 8, 56, 12],
[1, 2, 1, 1.21, 51],
[2, 1, 1.3, 1.3, 63]],
dtype=float)
msData.sampleMetadata = pandas.DataFrame(data=[[100, 1, 1, 1, AssayRole.LinearityReference, SampleType.StudyPool],
[90, 1, 1, 2, AssayRole.LinearityReference, SampleType.StudyPool],
[50, 1, 1, 3, AssayRole.LinearityReference, SampleType.StudyPool],
[10, 1, 1, 4, AssayRole.LinearityReference, SampleType.StudyPool],
[1, 1, 1, 5, AssayRole.LinearityReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank]],
columns=['Dilution', 'Batch', 'Correction Batch', 'Well', 'AssayRole', 'SampleType'])
msData.featureMetadata = pandas.DataFrame(data=[['Feature_1', 0.5, 100., 0.3],
['Feature_2', 0.55, 100.04, 0.3],
['Feature_3', 0.75, 200., 0.1],
['Feature_4', 0.9, 300., 0.1],
['Feature_5', 0.95, 300.08, 0.1]],
columns=['Feature Name','Retention Time','m/z','Peak Width'])
msData.featureMetadata['Exclusion Details'] = None
msData.featureMetadata['User Excluded'] = False
msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=msData.featureMetadata.index)
msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=msData.featureMetadata.index)
msData.initialiseMasks()
with self.subTest(msg='Default Parameters'):
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax RSD threshold'):
expectedFeatureMask = numpy.array([True, False, True, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=90, varianceRatio=0.1, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax correlation threshold'):
expectedFeatureMask = numpy.array([True, True, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter': True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=1.1, corrThreshold=0))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='High variance ratio'):
expectedFeatureMask = numpy.array([False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=100, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(blankThreshold=0.5))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='No blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':False})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Default withArtifactualFiltering'):
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1],[3,4]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData._tempArtifactualLinkageMatrix)
with self.subTest(msg='Altered withArtifactualFiltering parameters'):
expectedArtifactualLinkageMatrix = | pandas.DataFrame(data=[[0,1]],columns=['node1','node2']) | pandas.DataFrame |
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from pandas import testing as pdt
from rle_array.autoconversion import auto_convert_to_rle, decompress
from rle_array.dtype import RLEDtype
pytestmark = pytest.mark.filterwarnings("ignore:performance")
@pytest.mark.parametrize(
"orig, threshold, expected",
[
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# threshold
None,
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=RLEDtype(np.int32)),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=RLEDtype(np.bool_)),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": | pd.Series([True], dtype=np.bool_) | pandas.Series |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 = pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
other_feature3.to_csv('data/other_feature3.csv',index=None)
print(other_feature3.shape)
#for dataset2
t = dataset2[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset2[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset2[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset2[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset2[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset2[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset2[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset2[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature2 = pd.merge(t1,t,on='user_id')
other_feature2 = pd.merge(other_feature2,t3,on=['user_id','coupon_id'])
other_feature2 = pd.merge(other_feature2,t4,on=['user_id','date_received'])
other_feature2 = pd.merge(other_feature2,t5,on=['user_id','coupon_id','date_received'])
other_feature2 = pd.merge(other_feature2,t7,on=['user_id','coupon_id','date_received'])
other_feature2.to_csv('data/other_feature2.csv',index=None)
print(other_feature2.shape)
#for dataset1
t = dataset1[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset1[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset1[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset1[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset1[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset1[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset1[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset1[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature1 = pd.merge(t1,t,on='user_id')
other_feature1 = pd.merge(other_feature1,t3,on=['user_id','coupon_id'])
other_feature1 = pd.merge(other_feature1,t4,on=['user_id','date_received'])
other_feature1 = pd.merge(other_feature1,t5,on=['user_id','coupon_id','date_received'])
other_feature1 = pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received'])
other_feature1.to_csv('data/other_feature1.csv',index=None)
print(other_feature1.shape)
############# coupon related feature #############
"""
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
"""
def calc_discount_rate(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return float(s[0])
else:
return 1.0-float(s[1])/float(s[0])
def get_discount_man(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[0])
def get_discount_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[1])
def is_man_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 0
else:
return 1
#dataset3
dataset3['day_of_week'] = dataset3.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset3['day_of_month'] = dataset3.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset3['days_distance'] = dataset3.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,6,30)).days)
dataset3['discount_man'] = dataset3.discount_rate.apply(get_discount_man)
dataset3['discount_jian'] = dataset3.discount_rate.apply(get_discount_jian)
dataset3['is_man_jian'] = dataset3.discount_rate.apply(is_man_jian)
dataset3['discount_rate'] = dataset3.discount_rate.apply(calc_discount_rate)
d = dataset3[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset3 = pd.merge(dataset3,d,on='coupon_id',how='left')
dataset3.to_csv('data/coupon3_feature.csv',index=None)
#dataset2
dataset2['day_of_week'] = dataset2.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset2['day_of_month'] = dataset2.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset2['days_distance'] = dataset2.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,5,14)).days)
dataset2['discount_man'] = dataset2.discount_rate.apply(get_discount_man)
dataset2['discount_jian'] = dataset2.discount_rate.apply(get_discount_jian)
dataset2['is_man_jian'] = dataset2.discount_rate.apply(is_man_jian)
dataset2['discount_rate'] = dataset2.discount_rate.apply(calc_discount_rate)
d = dataset2[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset2 = pd.merge(dataset2,d,on='coupon_id',how='left')
dataset2.to_csv('data/coupon2_feature.csv',index=None)
#dataset1
dataset1['day_of_week'] = dataset1.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset1['day_of_month'] = dataset1.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset1['days_distance'] = dataset1.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,4,13)).days)
dataset1['discount_man'] = dataset1.discount_rate.apply(get_discount_man)
dataset1['discount_jian'] = dataset1.discount_rate.apply(get_discount_jian)
dataset1['is_man_jian'] = dataset1.discount_rate.apply(is_man_jian)
dataset1['discount_rate'] = dataset1.discount_rate.apply(calc_discount_rate)
d = dataset1[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset1 = pd.merge(dataset1,d,on='coupon_id',how='left')
dataset1.to_csv('data/coupon1_feature.csv',index=None)
############# merchant related feature #############
"""
1.merchant related:
total_sales. sales_use_coupon. total_coupon
coupon_rate = sales_use_coupon/total_sales.
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
"""
#for dataset3
merchant3 = feature3[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant3[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant3[merchant3.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant3[merchant3.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant3_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t2,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t3,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t5,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t6,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t7,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t8,on='merchant_id',how='left')
merchant3_feature.sales_use_coupon = merchant3_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature['merchant_coupon_transfer_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_coupon
merchant3_feature['coupon_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_sales
merchant3_feature.total_coupon = merchant3_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature.to_csv('data/merchant3_feature.csv',index=None)
#for dataset2
merchant2 = feature2[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant2[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant2[merchant2.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant2[merchant2.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant2_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t2,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t3,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t5,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t6,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t7,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t8,on='merchant_id',how='left')
merchant2_feature.sales_use_coupon = merchant2_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant2_feature['merchant_coupon_transfer_rate'] = merchant2_feature.sales_use_coupon.astype('float') / merchant2_feature.total_coupon
merchant2_feature['coupon_rate'] = merchant2_feature.sales_use_coupon.astype('float') / merchant2_feature.total_sales
merchant2_feature.total_coupon = merchant2_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant2_feature.to_csv('data/merchant2_feature.csv',index=None)
#for dataset1
merchant1 = feature1[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant1[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant1[merchant1.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant1[(merchant1.date!='null')&(merchant1.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant1[merchant1.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant1[(merchant1.date!='null')&(merchant1.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant1_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t2,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t3,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t5,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t6,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t7,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t8,on='merchant_id',how='left')
merchant1_feature.sales_use_coupon = merchant1_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant1_feature['merchant_coupon_transfer_rate'] = merchant1_feature.sales_use_coupon.astype('float') / merchant1_feature.total_coupon
merchant1_feature['coupon_rate'] = merchant1_feature.sales_use_coupon.astype('float') / merchant1_feature.total_sales
merchant1_feature.total_coupon = merchant1_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant1_feature.to_csv('data/merchant1_feature.csv',index=None)
############# user related feature #############
"""
3.user related:
count_merchant.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
buy_use_coupon/buy_total
user_date_datereceived_gap
"""
def get_user_date_datereceived_gap(s):
s = s.split(':')
return (date(int(s[0][0:4]),int(s[0][4:6]),int(s[0][6:8])) - date(int(s[1][0:4]),int(s[1][4:6]),int(s[1][6:8]))).days
#for dataset3
user3 = feature3[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user3[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user3[user3.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user3[(user3.date!='null')&(user3.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user3[(user3.date!='null')&(user3.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user3[user3.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user3[user3.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user3[(user3.date_received!='null')&(user3.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user3_feature = pd.merge(t,t1,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t3,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t4,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t5,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t6,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t7,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t8,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t9,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t11,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t12,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t13,on='user_id',how='left')
user3_feature.count_merchant = user3_feature.count_merchant.replace(np.nan,0)
user3_feature.buy_use_coupon = user3_feature.buy_use_coupon.replace(np.nan,0)
user3_feature['buy_use_coupon_rate'] = user3_feature.buy_use_coupon.astype('float') / user3_feature.buy_total.astype('float')
user3_feature['user_coupon_transfer_rate'] = user3_feature.buy_use_coupon.astype('float') / user3_feature.coupon_received.astype('float')
user3_feature.buy_total = user3_feature.buy_total.replace(np.nan,0)
user3_feature.coupon_received = user3_feature.coupon_received.replace(np.nan,0)
user3_feature.to_csv('data/user3_feature.csv',index=None)
#for dataset2
user2 = feature2[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user2[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user2[user2.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user2[(user2.date!='null')&(user2.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user2[(user2.date!='null')&(user2.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user2[user2.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user2[user2.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user2[(user2.date_received!='null')&(user2.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user2_feature = pd.merge(t,t1,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t3,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t4,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t5,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t6,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t7,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t8,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t9,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t11,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t12,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t13,on='user_id',how='left')
user2_feature.count_merchant = user2_feature.count_merchant.replace(np.nan,0)
user2_feature.buy_use_coupon = user2_feature.buy_use_coupon.replace(np.nan,0)
user2_feature['buy_use_coupon_rate'] = user2_feature.buy_use_coupon.astype('float') / user2_feature.buy_total.astype('float')
user2_feature['user_coupon_transfer_rate'] = user2_feature.buy_use_coupon.astype('float') / user2_feature.coupon_received.astype('float')
user2_feature.buy_total = user2_feature.buy_total.replace(np.nan,0)
user2_feature.coupon_received = user2_feature.coupon_received.replace(np.nan,0)
user2_feature.to_csv('data/user2_feature.csv',index=None)
#for dataset1
user1 = feature1[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user1[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user1[user1.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user1[(user1.date!='null')&(user1.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user1[(user1.date!='null')&(user1.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user1[user1.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user1[user1.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user1[(user1.date_received!='null')&(user1.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user1_feature = pd.merge(t,t1,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t3,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t4,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t5,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t6,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t7,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t8,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t9,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t11,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t12,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t13,on='user_id',how='left')
user1_feature.count_merchant = user1_feature.count_merchant.replace(np.nan,0)
user1_feature.buy_use_coupon = user1_feature.buy_use_coupon.replace(np.nan,0)
user1_feature['buy_use_coupon_rate'] = user1_feature.buy_use_coupon.astype('float') / user1_feature.buy_total.astype('float')
user1_feature['user_coupon_transfer_rate'] = user1_feature.buy_use_coupon.astype('float') / user1_feature.coupon_received.astype('float')
user1_feature.buy_total = user1_feature.buy_total.replace(np.nan,0)
user1_feature.coupon_received = user1_feature.coupon_received.replace(np.nan,0)
user1_feature.to_csv('data/user1_feature.csv',index=None)
################## user_merchant related feature #########################
"""
4.user_merchant:
times_user_buy_merchant_before.
"""
#for dataset3
all_user_merchant = feature3[['user_id','merchant_id']]
all_user_merchant.drop_duplicates(inplace=True)
t = feature3[['user_id','merchant_id','date']]
t = t[t.date!='null'][['user_id','merchant_id']]
t['user_merchant_buy_total'] = 1
t = t.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t.drop_duplicates(inplace=True)
t1 = feature3[['user_id','merchant_id','coupon_id']]
t1 = t1[t1.coupon_id!='null'][['user_id','merchant_id']]
t1['user_merchant_received'] = 1
t1 = t1.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t1.drop_duplicates(inplace=True)
t2 = feature3[['user_id','merchant_id','date','date_received']]
t2 = t2[(t2.date!='null')&(t2.date_received!='null')][['user_id','merchant_id']]
t2['user_merchant_buy_use_coupon'] = 1
t2 = t2.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t2.drop_duplicates(inplace=True)
t3 = feature3[['user_id','merchant_id']]
t3['user_merchant_any'] = 1
t3 = t3.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t3.drop_duplicates(inplace=True)
t4 = feature3[['user_id','merchant_id','date','coupon_id']]
t4 = t4[(t4.date!='null')&(t4.coupon_id=='null')][['user_id','merchant_id']]
t4['user_merchant_buy_common'] = 1
t4 = t4.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t4.drop_duplicates(inplace=True)
user_merchant3 = pd.merge(all_user_merchant,t,on=['user_id','merchant_id'],how='left')
user_merchant3 = pd.merge(user_merchant3,t1,on=['user_id','merchant_id'],how='left')
user_merchant3 = pd.merge(user_merchant3,t2,on=['user_id','merchant_id'],how='left')
user_merchant3 = pd.merge(user_merchant3,t3,on=['user_id','merchant_id'],how='left')
user_merchant3 = pd.merge(user_merchant3,t4,on=['user_id','merchant_id'],how='left')
user_merchant3.user_merchant_buy_use_coupon = user_merchant3.user_merchant_buy_use_coupon.replace(np.nan,0)
user_merchant3.user_merchant_buy_common = user_merchant3.user_merchant_buy_common.replace(np.nan,0)
user_merchant3['user_merchant_coupon_transfer_rate'] = user_merchant3.user_merchant_buy_use_coupon.astype('float') / user_merchant3.user_merchant_received.astype('float')
user_merchant3['user_merchant_coupon_buy_rate'] = user_merchant3.user_merchant_buy_use_coupon.astype('float') / user_merchant3.user_merchant_buy_total.astype('float')
user_merchant3['user_merchant_rate'] = user_merchant3.user_merchant_buy_total.astype('float') / user_merchant3.user_merchant_any.astype('float')
user_merchant3['user_merchant_common_buy_rate'] = user_merchant3.user_merchant_buy_common.astype('float') / user_merchant3.user_merchant_buy_total.astype('float')
user_merchant3.to_csv('data/user_merchant3.csv',index=None)
#for dataset2
all_user_merchant = feature2[['user_id','merchant_id']]
all_user_merchant.drop_duplicates(inplace=True)
t = feature2[['user_id','merchant_id','date']]
t = t[t.date!='null'][['user_id','merchant_id']]
t['user_merchant_buy_total'] = 1
t = t.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t.drop_duplicates(inplace=True)
t1 = feature2[['user_id','merchant_id','coupon_id']]
t1 = t1[t1.coupon_id!='null'][['user_id','merchant_id']]
t1['user_merchant_received'] = 1
t1 = t1.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t1.drop_duplicates(inplace=True)
t2 = feature2[['user_id','merchant_id','date','date_received']]
t2 = t2[(t2.date!='null')&(t2.date_received!='null')][['user_id','merchant_id']]
t2['user_merchant_buy_use_coupon'] = 1
t2 = t2.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t2.drop_duplicates(inplace=True)
t3 = feature2[['user_id','merchant_id']]
t3['user_merchant_any'] = 1
t3 = t3.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t3.drop_duplicates(inplace=True)
t4 = feature2[['user_id','merchant_id','date','coupon_id']]
t4 = t4[(t4.date!='null')&(t4.coupon_id=='null')][['user_id','merchant_id']]
t4['user_merchant_buy_common'] = 1
t4 = t4.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t4.drop_duplicates(inplace=True)
user_merchant2 = pd.merge(all_user_merchant,t,on=['user_id','merchant_id'],how='left')
user_merchant2 = | pd.merge(user_merchant2,t1,on=['user_id','merchant_id'],how='left') | pandas.merge |
from typing import List
import sys
import psycopg2 as pg2 # Preferred cursor connection
from sqlalchemy import create_engine # preferred for pushing back to DB
import yaml
import pandas as pd
import numpy as np
_Loader_Registry = {}
def add_loader(name):
def _add_loader(fn):
_Loader_Registry[name] = fn
return fn
"""
Any function annotated with add_loader must implement support for database cursor at the key 'cur'
"""
return _add_loader
def get_loader(name):
return _Loader_Registry[name]
def connect_cursor(USERPATH):
"""Connect a cursor to database.
Input: Path to authorized user "secrets.yaml" file that is
expected to include connection parameters
Output: Database cursor object
Future consideration:
enable user to input latest year of available data instead
of hardcoding (e.g., 2011 current cutoff based on 2015 latest data)
"""
# Prepare secrets file to connect to DB
with open(USERPATH, 'r') as f:
# loads contents of secrets.yaml into a python dictionary
secret_config = yaml.safe_load(f.read())
# Set database connection to `conn`
db_params = secret_config['db']
conn = pg2.connect(host=db_params['host'],
port=db_params['port'],
dbname=db_params['dbname'],
user=db_params['user'],
password=db_params['password'])
# Connect cursor with psycopg2 database connection
cur = conn.cursor()
return cur
def get_graduation_info(cur) -> pd.DataFrame:
"""Fetch relevant student_lookups as rows with columns indicating known and
unknown withdraw reasons (i.e., year graduated, year dropped out, year
transferred, year withdrew)
Input: Database cursor
Output: Pandas DataFrame of student_lookups (IDs) and their
known withdraw outcomes based on raw database data
Future consideration:
enable user to input latest year of available data instead
of hardcoding (e.g., 2013 current cutoff based on 2015 latest data
and expected cohort of incoming 10th grade students + overall 4-yr on-time graduation)
"""
# SQL SELECT statement to gather all students that entered 9th grade
# prior to 2011, the latest year for which we could feasibly see
# graduation outcomes.
# Along with unique entrants, get their noted withdraw status
# which covers graduation, dropout, transfers and withdraws.
cur.execute('''
select *
from (
SELECT *, ROW_NUMBER() OVER
(PARTITION BY student_lookup
ORDER BY student_lookup) AS rnum
FROM sketch.hs_withdraw_info_2 hwi) t
where t.rnum = 1
and t.entry_year >= 2007 and t.entry_year <= 2013
''')
# Use cursor to fetch all rows into a list
rows = cur.fetchall()
# Build dataframe from rows
df = pd.DataFrame(rows, columns=[name[0] for name in cur.description])
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
# Additional step to drop the students with no 9th grade history
s = no_ninth_grade(cur)
drop_set = list(set(df['student_lookup']).intersection(set(s)))
df = df[~(df['student_lookup'].isin(drop_set))]
return df
def no_ninth_grade(cur):
"""Fetch student_lookups for students that entered 10th grade but
for which we have no 9th grade data.
Input: Database cursor
Output: Pandas DataFrame of student_lookups (IDs)
"""
cur.execute('''
select distinct student_lookup from sketch.hs_withdraw_info_2 hwi
where student_lookup not in
(select distinct student_lookup from clean.all_snapshots where grade = 9)
and hwi.entry_year between 2007 and 2013;
''')
rows = cur.fetchall()
df = pd.DataFrame(rows, columns=[name[0] for name in cur.description])
s = df['student_lookup'].astype('int')
return s
def to_sql(USERPATH, df):
"""Function will push processed DataFrame to the database.
Input: Path to secrets file to connect to db and dataframe
Output: None
Future work: Generalize
"""
# Prepare secrets file to connect to DB
with open(USERPATH, 'r') as f:
# loads contents of secrets.yaml into a python dictionary
secret_config = yaml.safe_load(f.read())
# Set connection to `engine` for uploading table
db_params = secret_config['db']
engine = create_engine(
'postgres://{user}:{password}@{host}:{port}/{dbname}'.format(
host=db_params['host'],
port=db_params['port'],
dbname=db_params['dbname'],
user=db_params['user'],
password=db_params['password']))
# Drop table if exists since it will be replaced
engine.execute('''drop table if exists sketch.ninth_grade_entries''')
engine.execute("COMMIT")
# Create the table in sketch as 'ninth_grade_entries'
create_sql = '''CREATE TABLE sketch.ninth_grade_entries ('''
# This loop concatenates column names with SQL VARCHAR datatype
# and appends it to the `create_sql' string. It calls all columns
# into VARCHAR as default so it could handle columns with text
# and/or number data.
for col in df.columns:
create_sql += col.strip("'") + ' varchar,'
# Execute create query and commit table to database.
engine.execute(create_sql.rstrip(',') + ');')
engine.execute("COMMIT")
df.pg_copy_to('ninth_grade_entries',
engine,
schema='sketch',
index=False,
if_exists='replace')
@add_loader("snapshot")
def get_snapshot_features(col_names: List[str], cur):
"""Function to get features from all_snapshots.
Input: Databse cursor object and selected columns from
clean.all_snapshots table in schools database
Output: DataFrame
"""
# Make col_names into a string for the OUTER query
tcol_str = ''
for col in col_names[:-1]:
tcol_str += ('t.' + col + ', ')
# Append last element without comma delimiter
tcol_str = tcol_str + 't.' + col_names[-1]
# Make col_names into a string for the INNER query
col_str = ''
for col in col_names[:-1]:
col_str += (col + ', ')
# Append last element without comma delimiter
col_str = col_str + col_names[-1]
qry = '''select distinct t.student_lookup, ''' + tcol_str + ''' from (select distinct student_lookup, school_year, ROW_NUMBER() OVER (PARTITION BY student_lookup) as rnum, ''' + col_str + ''' from clean.all_snapshots where grade = 9 order by student_lookup, school_year) t where t.rnum=1;'''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("disability_and_intervention")
def get_disadvantage_features(cur):
qry = ''' select * from sketch.disadv_and_intervention;'''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
# Make sure student_id is an int
df = df.astype('int')
return df
@add_loader("marks")
def get_course_mark_features(cur):
qry = ''' select * from sketch.marks_by_subject; '''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
# Letter and numeric are duplicative in a way, so drop 1
df = df[df.columns[~df.columns.str.contains('letter')]]
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("absence_and_suspension")
def get_absence_features(cur):
qry = ''' select * from sketch.absences; '''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
df = df.replace(np.nan, 0)
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("absence_discipline")
def get_absence_features(cur):
qry = ''' select * from sketch.absence_discipline; '''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
df = df.replace(np.nan, 0)
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("std_test")
def get_test_score_features(cur):
qry = ''' select * from sketch.std_test; '''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
# We don't have anything going back so far, so keep it 7-9 grades
df = df[df.columns[~df.columns.str.contains('ogt')]]
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("snapshot_non_mark")
def get_snapshot_non_mark_features(cur):
qry = '''select student_lookup, ethnicity, gifted, limited_english
from clean.all_snapshots a where a.grade = 9'''
cur.execute(qry)
rows = cur.fetchall()
data = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
df = data.drop_duplicates(subset=['student_lookup'], keep='first')
df['limited_english_cate'] = np.where(
df['limited_english'].str.contains('N'), 1, 0)
df['gifted_c'] = np.where(df['gifted'].str.contains('N'), 1, 0)
df = pd.get_dummies(df, columns=['ethnicity'], prefix=['ethnicity'])
df = df.drop(['gifted', 'limited_english'], axis=1)
#cur.execute(qry)
#rows = cur.fetchall()
#df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
# columns=[name[0] for name in cur.description])
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("repeat_grade_count")
def repeat_grade_count(cur):
qry = '''select distinct student_lookup, grade, school_year
from clean.all_grades a'''
cur.execute(qry)
rows = cur.fetchall()
data = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
data = data[data.duplicated(subset=['student_lookup', 'grade'],
keep=False)]
df = data.groupby(['student_lookup', 'grade']).school_year.count()
df = pd.DataFrame(df)
df.reset_index(inplace=True)
df2 = pd.DataFrame(df.groupby(['student_lookup']).grade.count())
df2.reset_index(inplace=True)
df2.columns = ['student_lookup', 'repeat_grade_count']
df2['student_lookup'] = df2['student_lookup'].astype('int')
return df2
@add_loader("grade_9_gpa")
def grade_9_gpa(cur):
qry = ''' select student_lookup, gpa_9, school_gpa_9_rank, school_gpa_9_decile
from sketch.grade_9_gpa; '''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("demographics")
def get_demographic_features(cur):
qry = ''' select * from sketch.demographics; '''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("demographics_grade_10")
def get_demographic_features(cur):
qry = ''' select * from sketch.demographics_grade_10; '''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
@add_loader("school_district")
def get_school_features(cur):
qry = ''' select * from sketch.school_district; '''
cur.execute(qry)
rows = cur.fetchall()
df = pd.DataFrame([[int(row[0])] + list(row)[1:] for row in rows],
columns=[name[0] for name in cur.description])
# We don't have anything going back so far, so keep it 7-9 grades
df = df[df.columns[~df.columns.str.contains('grade_6')]]
# Make sure student_id is an int
df['student_lookup'] = df['student_lookup'].astype('int')
return df
## Deprecated
def deduplicate_columns(df):
"""Function will deduplicate instances of student_lookup. Most arise from
students with multiple transfer years, and so this will focus on 1) purging
students who transferred out of the area and 2) maintaining only one row
for within-area transfers and capturing a single graduated/not graduated
outcome.
Output a cleaned dataframe with unique student_lookups now as index
"""
pass
def impute_labels(df):
"""Function will complete label creation by imputing withdraw reason for
all students that lack such outcomes."""
pass
def aggregate_features(cur, df):
"""Function to add aggregate features to student_lookups.
Input: Databse cursor object and relevant students dataframe
Output: Merged dataframe linking aggregate features to student dataframe
Future work: generalize feature aggregation and linking
"""
# Get aggregate absence feature
absence_query = '''
select student_lookup, avg(days_absent) as avg_abs, avg(days_absent_unexcused) as avg_abs_unex
from clean.all_snapshots
where grade <9
group by student_lookup;
'''
cur.execute(absence_query)
rows = cur.fetchall()
absence_df = pd.DataFrame(rows,
columns=[name[0] for name in cur.description])
absence_df = absence_df.astype({'student_lookup': 'Int32'})
# Get average grade feature
grade_query = '''
select distinct student_lookup, ogt_socstudies_ss, ogt_science_ss, ogt_write_ss, ogt_math_ss, ogt_read_ss, eighth_socstudies_ss, eighth_science_ss, eighth_math_ss, eighth_read_ss
from clean.oaaogt;
'''
cur.execute(grade_query)
rows = cur.fetchall()
grade_df = pd.DataFrame(rows,
columns=[name[0] for name in cur.description])
# Function to purge DNS [did not sit] and other non-numeric outcomes for now
f = lambda x: None if x in ['DNA', 'INV', 'DNS'] else x
for col in grade_df.columns:
grade_df[col] = grade_df[col].apply(f)
# Change dtypes to float and get averages for OGT tests and Eighth grade marks
grade_df = grade_df.astype('float')
grade_df['ogt_avg'] = grade_df[[
'ogt_socstudies_ss', 'ogt_science_ss', 'ogt_write_ss', 'ogt_math_ss',
'ogt_read_ss'
]].mean(axis=1)
grade_df['grade_8_avg'] = grade_df[[
'eighth_socstudies_ss', 'eighth_science_ss', 'eighth_math_ss',
'eighth_read_ss'
]].mean(axis=1)
grade_df = grade_df[['student_lookup', 'ogt_avg',
'grade_8_avg']].astype({'student_lookup': 'Int32'})
# Merge features to relevant students
feature_df = pd.merge(absence_df, grade_df, on='student_lookup')
df = | pd.merge(df, feature_df, how='left', on='student_lookup') | pandas.merge |
from datetime import datetime as dt
import pandas as pd
from env.Composer import Composer
from env.EBayEnv import EBayEnv
from env.generate.Recorder import OutcomeRecorder
from env.Player import SimulatedSeller, SimulatedBuyer
from env.LstgLoader import ChunkLoader
from env.QueryStrategy import DefaultQueryStrategy
from utils import load_chunk
from featnames import LSTG, SIM
class Generator:
def __init__(self, verbose=False, test=False):
"""
Constructor
:param bool verbose: if True, print info about simulator activity
:param bool test: if True, does not advance listing
"""
self.verbose = verbose
self.initialized = False
self.test = test
# model interfaces and input composer
self.recorder = None
self.composer = None
self.loader = None
self.query_strategy = None
self.env = None
def process_chunk(self, part=None, chunk=None, num_sims=1):
self.loader = self.load_chunk(part=part, chunk=chunk, num_sims=num_sims)
if not self.initialized:
self.initialize()
self.env = self.generate_env()
return self.generate()
def initialize(self):
self.composer = self.generate_composer()
self.query_strategy = self.generate_query_strategy()
self.recorder = self.generate_recorder()
self.initialized = True
def generate_recorder(self):
return None
def generate_composer(self):
return Composer()
def generate(self):
raise NotImplementedError()
def simulate_lstg(self):
"""
Simulates listing once.
:return: None
"""
self.env.reset()
self.env.run()
@property
def env_class(self):
return EBayEnv
@property
def env_args(self):
return dict(query_strategy=self.query_strategy,
loader=self.loader,
recorder=self.recorder,
verbose=self.verbose,
composer=self.composer,
test=self.test)
def generate_env(self):
return self.env_class(**self.env_args)
@staticmethod
def generate_buyer():
return SimulatedBuyer(full=True)
@staticmethod
def generate_seller():
return SimulatedSeller(full=True)
def generate_query_strategy(self):
buyer = self.generate_buyer()
seller = self.generate_seller()
return DefaultQueryStrategy(buyer=buyer, seller=seller)
def load_chunk(self, part=None, chunk=None, num_sims=None):
chunk = load_chunk(part=part, num=chunk)
return ChunkLoader(num_sims=num_sims, **chunk)
class OutcomeGenerator(Generator):
def generate(self):
"""
Simulates all lstgs in chunk according to experiment parameters
"""
print('Total listings: {}'.format(len(self.loader)))
t0 = dt.now()
while self.env.has_next_lstg():
self.env.next_lstg()
self.simulate_lstg()
# time elapsed
print('Avg time per listing: {} seconds'.format(
(dt.now() - t0).total_seconds() / len(self.loader)))
# return a dictionary
return self.recorder.construct_output()
def generate_recorder(self):
return OutcomeRecorder(verbose=self.verbose)
class ValueGenerator(Generator):
def generate(self):
print('Total listings: {}'.format(len(self.loader)))
t0 = dt.now()
rows = []
while self.env.has_next_lstg():
lstg, sim = self.env.next_lstg()
self.simulate_lstg()
rows.append([lstg, sim, self.env.outcome.price])
# time elapsed
print('Avg time per listing: {} seconds'.format(
(dt.now() - t0).total_seconds() / len(self.loader)))
# return series of sale prices
df = | pd.DataFrame.from_records(rows, columns=[LSTG, SIM, 'sale_price']) | pandas.DataFrame.from_records |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.t | o_numeric(temp_df["振幅"]) | pandas.to_numeric |
import numpy as np
import pandas as pd
from woodwork.logical_types import (
URL,
Age,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Timedelta
)
from woodwork.statistics_utils import (
_get_describe_dict,
_get_mode,
_make_categorical_for_mutual_info,
_replace_nans_for_mutual_info
)
from woodwork.tests.testing_utils import mi_between_cols, to_pandas
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def test_get_mode():
series_list = [
pd.Series([1, 2, 3, 4, 2, 2, 3]),
pd.Series(['a', 'b', 'b', 'c', 'b']),
pd.Series([3, 2, 3, 2]),
pd.Series([np.nan, np.nan, np.nan]),
pd.Series([pd.NA, pd.NA, pd.NA]),
| pd.Series([1, 2, np.nan, 2, np.nan, 3, 2]) | pandas.Series |
from flask import Flask, request, send_from_directory
from models import Mesurement, Clothes, Signs
from datetime import datetime
from Analyze import FindMean
import json
from dateutil.parser import parse
from babel import dates
import pandas
from bson import json_util
app = Flask(__name__)
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader(__name__, 'templates'))
def j_min(d_list):
return min(d_list)
def j_max(value):
return max(d_list)
def format_datetime(value, format='medium'):
print(value)
value = parse(str(value))
if format == 'full':
format="EEEE, d. MMMM y 'at' HH:mm"
elif format == 'medium':
format="EE dd.MM.y HH:mm"
return dates.format_datetime(value, format, tzinfo=dates.get_timezone('Asia/Vladivostok'))
env.filters['datetime'] = format_datetime
env.filters['min'] = j_min
env.filters['max'] = j_max
app.config['STATIC_FOLDER'] = 'templates'
env.filters['jsonify'] = json.dumps
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory('templates/css', path)
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('templates/js', path)
@app.route('/img/<path:path>')
def send_img(path):
return send_from_directory('templates/img', path)
@app.route('/src/<path:path>')
def send_src(path):
return send_from_directory('templates/src', path)
@app.route('/team/<team_id>')
def send_MS(team_id):
b = Mesurement.query.raw_output()
b = b.filter(Mesurement.team == int(team_id)).all()
meteo_n = | pandas.DataFrame(b) | pandas.DataFrame |
# import alphautils as al
import pandas as pd
import math
from operator import itemgetter
def vol_sort(volume, order):
return {k: v for k, v in sorted(volume.items(),
key=itemgetter(order), reverse=True)}
def inner_vol_handle(args):
quote, df, index, bias, enum = args
dict_ = {}
for num, name in enumerate(enum):
temp = df.loc[index == num+bias]
dict_[name] = temp['Pips'].mean()
return dict_
def add_week_of_month(df):
df['Week'] = pd.to_numeric(df.index.day/7)
df['Week'] = df['Week'].apply(lambda x: math.ceil(x))
return df
def forexvolatility(pairs, timeframe='Daily', periods=10):
vols = {}
timeframe_vol = {}
WEEK_DAYS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri']
MONTH_WEEKS = ['1st', '2nd', '3rd', '4th', '5th']
YEAR_MONTHS = ['Jan', 'Feb', 'Mar', 'Apr', 'May',
'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
'Nov', 'Dec']
for quote in pairs:
df = pd.read_csv(f'investpy/currenciesdata/{quote}_{timeframe}.csv')
df = df.iloc[-periods:]
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
__all__ = ['BradleyTerry']
from typing import Tuple
import attr
import numpy as np
import pandas as pd
from . import annotations
from .annotations import Annotation, manage_docstring
from .base_aggregator import BaseAggregator
_EPS = np.float_power(10, -10)
@attr.s
@manage_docstring
class BradleyTerry(BaseAggregator):
"""
Bradley-Terry, the classic algorithm for aggregating pairwise comparisons.
<NAME>. 2004.
MM algorithms for generalized Bradley-Terry models
Ann. Statist., Vol. 32, 1 (2004): 384–406.
<NAME>. and <NAME>. 1952.
Rank analysis of incomplete block designs. I. The method of paired comparisons.
Biometrika, Vol. 39 (1952): 324–345.
"""
n_iter: int = attr.ib()
result_: annotations.LABEL_SCORES = attr.ib(init=False)
@manage_docstring
def fit(self, data: annotations.PAIRWISE_DATA) -> Annotation(type='BradleyTerry', title='self'):
M, unique_labels = self._build_win_matrix(data)
if not unique_labels.size:
self.result_ = pd.Series([], dtype=np.float64)
return self
T = M.T + M
active = T > 0
w = M.sum(axis=1)
Z = np.zeros_like(M, dtype=float)
p = np.ones(M.shape[0])
p_new = p.copy() / p.sum()
for _ in range(self.n_iter):
P = np.broadcast_to(p, M.shape)
Z[active] = T[active] / (P[active] + P.T[active])
p_new[:] = w
p_new /= Z.sum(axis=0)
p_new /= p_new.sum()
p[:] = p_new
self.result_ = | pd.Series(p_new, index=unique_labels) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[47]:
import csv
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers import Cropping2D
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
# In[28]:
lines = []
with open('data/driving_log.csv', 'r') as f:
reader = csv.reader(f)
for line in reader:
lines.append(line)
# In[38]:
# only for visualization
col_names = ['center', 'left', 'right',
'steering', 'throttle', 'brake', 'speed']
data_reader = | pd.DataFrame(lines, columns=col_names) | pandas.DataFrame |
import re
import requests
import sys
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from pdb import set_trace as pb
max_fallback = 2
class Currency:
def __init__(self):
self.data = {}
self.data_hist = {}
def get(self, currency_pair):
'''
Parameters
----------
currency_pair : str
Returns
-------
dictionary of the currency pair
'''
if currency_pair not in self.data:
curr = get_historical_currency(currency_pair)
self.data[currency_pair] = curr.T.to_dict()[curr.index[0]]
return self.data[currency_pair]
def get_hist(self, currency_pair, dates):
if currency_pair not in self.data_hist:
self.data_hist[currency_pair] = get_historical_currency(currency_pair, dates)
return self.data_hist[currency_pair]
def fill(self):
'''
Fill entire data cross pair
'''
if self.data == {}: self.get('USD')
i = self.data.keys()[0]
for k in self.data[i].keys():
self.get(k)
def get_historical_currency(base, date= | pd.datetime.today() | pandas.datetime.today |
import codecademylib3_seaborn
from bs4 import BeautifulSoup
import requests
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
print("some")
webpage_response = requests.get("https://s3.amazonaws.com/codecademy-content/courses/beautifulsoup/cacao/index.html")
webpage = webpage_response.content
soup=BeautifulSoup(webpage,"html.parser")
ratings = []
rating = soup.find_all(attrs={"class":"Rating"})
for rate in rating[1:]:
ratings.append(float(rate.get_text()))
print(ratings)
plt.hist(ratings)
plt.show()
companies = soup.select(".Company")
all_company = []
for company in companies[1:]:
all_company.append(company.get_text())
print(all_company)
data = {"Company":all_company, "Rating":ratings}
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date: 2021/03/11 Thu
# @Author: ShayXU
# @Filename: predict.py
"""
0、重构下代码,优化结构
0.1 *pandas rolling聚合 是否有信息泄露 (大概不会吧)
1、结果不准确 =》 再加特征,多维CNN
# volume 成交量
# amount 成交额
# turn 换手率
2、结果不稳定 =》 多次训练。
3、使用tensorboard进行超参优化
4、模型集成
5、模型量化
"""
import os
import datetime
from tqdm import tqdm
import csv
import numpy as np
import pandas as pd
import matplotlib. pyplot as plt
import baostock as bs # 股票宝,获取股票数据
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Flatten,Reshape,Dropout,Activation
from tensorflow.keras.layers import Conv2D,MaxPooling2D
from tensorflow.keras.layers import Conv1D,MaxPooling1D
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import EarlyStopping
def download():
# 从股票宝下载股票数据
bs.login()
for stock_code in tqdm(stock_code_list):
stock_info_path = "stock_info/" + stock_code + ".csv"
if not os.path.exists(stock_info_path) or re_download:
rs = bs.query_history_k_data(stock_code, "date, open, close, volume, amount, turn, pctChg", start_date=start_date, end_date=to_date, frequency="d", adjustflag="3")
# volume 成交量
# amount 成交额
# turn 换手率
data_list = []
while (rs.error_code == '0') & rs.next(): # 获取一条记录,将记录合并在一起
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
result.to_csv(stock_info_path, index=False)
bs.logout()
def preprocess():
stock_info_path = "stock_info/" + stock_code + ".csv" # 文件路径
# 读取csv文件
stock = pd.read_csv(stock_info_path, parse_dates=['date'])
if i == 0:
pass
else:
stock = stock[:-i]
# 准备数据
stock['close_nomalized'] = (stock['close']-stock['close'].min())/(stock['close'].max()-stock['close'].min()) # 收盘价 归一化
stock['volume_nomalized'] = (stock['volume']-stock['volume'].min())/(stock['volume'].max()-stock['volume'].min()) # 交易量 归一化
stock['avg_price'] = stock['close'].rolling(predict_period).mean() # 最近周期内的平均股价
stock = stock[predict_period-1:]
stock['future_price'] = stock['close'].rolling(predict_period).mean().shift(-predict_period) # 未来股价均值(不包含当日收盘价)
# stock = stock.dropna(axis=0) # 去除空值
def flat_or_not(x):
if x >= threshold_flat:
return 2 # 涨
elif x <= -threshold_flat:
return 1 # 跌
else:
return 0 # 持平
stock['label'] = ((stock['future_price'] - stock['avg_price']) / stock['avg_price']).apply(flat_or_not)
n = len(stock)
if not cnn_3d_flag:
x = np.array([stock['close_nomalized'][i:i+history_period] for i in range(n-history_period-predict_period+1)]).reshape(-1, 20, 20) # 输入 400天 (0:400)~(n-400-predict~n)
x = x[:, :, :, np.newaxis]
else:
x = np.array([stock[['close_nomalized', 'volume_nomalized']][i:i+history_period] for i in range(n-history_period-predict_period+1)]).reshape(-1, 20, 20, 2) # 输入 400天 + 交易量
x = x[:, :, :, :, np.newaxis]
y = stock['label'][history_period-1:].values[:-predict_period] # 标签
print(pd.DataFrame(y)[0].value_counts()) # 打印三种类别样本的个数。
return stock, x, y
def train():
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(32, 3, activation='relu', input_shape=(20, 20, 1))) # 卷积核的个数 => 输出的维度
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(3))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# model = tf.keras.models.load_model('saved_model.h5')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=6, verbose=1, mode='auto')
model.fit(x_train, y_train, epochs=epoch, validation_data=(x_test, y_test), callbacks = [monitor])
# tf.saved_model.save(model, 'saved_model/')
model.save('saved_model.h5')
def train_3d():
model = keras.models.Sequential()
model.add(keras.layers.Conv3D(32, (3, 3, 1), activation='relu', input_shape=(20, 20, 2, 1))) # 卷积核的个数 => 输出的维度
model.add(keras.layers.MaxPool3D((2, 2, 1)))
model.add(keras.layers.Conv3D(64, (3, 3, 1), activation='relu'))
model.add(keras.layers.MaxPool3D((2, 2, 1)))
model.add(keras.layers.Conv3D(64, (3, 3, 1), activation='relu'))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(3))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# model = tf.keras.models.load_model('saved_model.h5')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=6, verbose=1, mode='auto')
model.fit(x_train, y_train, epochs=epoch, validation_data=(x_test, y_test), callbacks = [monitor])
# tf.saved_model.save(model, 'saved_model/')
model.save('saved_model.h5')
def predict():
"""
preprocess中已经根据i,缩短了stock从而x,y都无需额外处理
"""
# 读取模型
model = tf.keras.models.load_model('saved_model.h5')
# model = tf.saved_model.load('saved_model/')
xi = tf.convert_to_tensor(x[[-1]], tf.float32, name='inputs')
predictions = model(xi)
score = tf.nn.softmax(predictions[0])
class_names = {
0: "持平",
1: "跌",
2: "涨"
}
print("Price: {}".format(stock['close'].values[-1]))
print(
"Stock {} most likely {} with a {:.2f} percent confidence."
.format(stock_code, class_names[np.argmax(score)], 100 * np.max(score))
)
with open("predict_output.csv", 'a', newline='') as f:
tmp = list(stock.loc[-1])
for item in [class_names[np.argmax(score)], 100 * np.max(score)]:
tmp.append(item)
csv.writer(f).writerow(tmp)
return [stock['close'].values[-1], np.argmax(score)]
if __name__ == '__main__':
# 清空验证结果文件
with open("predict_output.csv", 'w') as f:
pass
# 一些超参
to_date = datetime.datetime.now().strftime("%Y-%m-%d") # 今日日期
re_download = False # 重新下载数据
re_train = False # 重新训练
predict_period = 6 # 预测天数
history_period = 400 # 分析天数
epoch = 200
start_date = '2010-01-01' # 最早数据
threshold_flat = 0.007 # 股价持平的阈值
stock_code_list = | pd.read_csv('stock_codes.csv') | pandas.read_csv |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Testing hwm_allocation() with bookings in natural order.
import unittest
from imscommon.es.ims_esclient import ESClient
from pyspark.sql import HiveContext
from pyspark import SparkContext, SparkConf
import optimizer.util
import pandas
from pandas.testing import assert_frame_equal
import optimizer.algo.hwm
import os
import json
import warnings
class Unittest_HWM_Allocations_2(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
fpath = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
with open(fpath + '/data_source/bookings_fully_overlapped.json') as bookings_source:
self.bookings = json.load(bookings_source)
with open(fpath + '/data_source/cfg.json') as cfg_source:
self.cfg = json.load(cfg_source)
today = '20180402'
self.days = optimizer.util.get_days_from_bookings(today, self.bookings)
self.sc = SparkContext.getOrCreate()
self.hive_context = HiveContext(self.sc)
self.schema = optimizer.util.get_common_pyspark_schema()
def compare_two_dfs(self, pandas_df_expected, df_to_test_rows):
df = self.hive_context.createDataFrame(df_to_test_rows, self.schema)
df_allocated = optimizer.algo.hwm.hwm_allocation(df, self.bookings, self.days)
pandas_df_allocated = df_allocated.select("*").toPandas()
print(pandas_df_expected)
print(pandas_df_allocated)
return self.assertTrue(assert_frame_equal(pandas_df_expected, pandas_df_allocated, check_dtype=False) == None)
def test_hwm_allocation_case1(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1', 'b3', 'b2'], [], 733, {'b1': 500, 'b3': 233}]
df_to_test_rows = [(['20180402', ['b1', 'b3', 'b2'], [], {}, 733])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case2(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1'], ['b2', 'b3'], 6047, {'b1': 500}]
df_to_test_rows = [(['20180402', ['b1'], ['b2', 'b3'], {}, 6047])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case3(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b2'], ['b1', 'b3'], 1410, {'b2': 800}]
df_to_test_rows = [(['20180402', ['b2'], ['b1', 'b3'], {}, 1410])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case4(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b3'], ['b1', 'b2'], 12241, {'b3': 1000}]
df_to_test_rows = [(['20180402', ['b3'], ['b1', 'b2'], {}, 12241])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case5(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1', 'b2'], ['b3'], 3575, {'b1': 500, 'b2': 800}]
df_to_test_rows = [(['20180402', ['b1', 'b2'], ['b3'], {}, 3575])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case6(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b2', 'b3'], ['b1'], 1002, {'b3': 1000, 'b2': 2}]
df_to_test_rows = [(['20180402', ['b2', 'b3'], ['b1'], {}, 1002])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case7(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1', 'b3'], ['b2'], 11181, {'b1': 500, 'b3': 1000}]
df_to_test_rows = [(['20180402', ['b1', 'b3'], ['b2'], {}, 11181])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case8(self):
pandas_df_expected = | pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated']) | pandas.DataFrame |
#%%
from os import name
import sys
from matplotlib.pyplot import xticks, ylabel, ylim
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import pylab
from matplotlib import rcParams
matplotlib.rcParams['text.usetex'] = True
seeds = [x for x in range(2012, 2022)]
dataset = 'yahoo' #'sim4' or 'coat' or 'yahoo'
if len(sys.argv) == 2:
dataset = sys.argv[1]
lines = {}
line_colors = sns.color_palette()
line_colors[-1], line_colors[-4] = line_colors[-4], line_colors[-1]
line_styles = ['--', ':', '-.', '-', '-', '-', '-', '--', ':', '-.']
line_markers = ['o', 'o', 'o', 'o', 's', 'P', 'D', 's', 's', 's']
for i, mode in enumerate(['boi', 'pld', 'mlp', 'att', 'mlp_tanh', 'cnn', 'gru', 'mlp_relu', 'mlp_sigmoid']):
lines[mode] = [line_styles[i], line_markers[i], line_colors[i]]
def draw(path, folder='result/', n_points = None):
df = []
for seed in seeds:
file_name = folder + path % seed
df_ = | pd.read_csv(file_name, header=None, index_col=False, sep=' ') | pandas.read_csv |
import argparse
import csv
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import scipy.stats as st
import math
parser = argparse.ArgumentParser(description='Calculate Z-score for DIFFRAC experiments')
parser.add_argument('replicate_count', metavar = 'N', type=int,
help='number of replicates')
parser.add_argument('collapse_method', metavar = 'm', type=str,
help='method for collapsing PSM/fraction z-scores to protein z-scores. Either stouffer or max.')
parser.add_argument('control_elut_1', metavar = 'c', type=str,
help='control elut file path')
parser.add_argument('treat_elut_1', metavar = 't', type=str,
help='treatment elut file path')
parser.add_argument('-c2','--control_elut_2', type=str,
help='control elut file path for 2nd replicate',
)
parser.add_argument('-t2','--treat_elut_2', type=str,
help='treatment elut file path for 2nd replicate',
)
parser.add_argument('-c3','--control_elut_3', type=str,
help='control elut file path for 3rd replicate',
)
parser.add_argument('-t3','--treat_elut_3', type=str,
help='treatment elut file path for 3rd replicate',
)
args = parser.parse_args()
#make new dataframe with PSM/fraction format for all replicates
ctl_1 = pd.read_csv(args.control_elut_1, sep='\t')
treat_1 = pd.read_csv(args.treat_elut_1, sep='\t')
sample_dict = {'ctl_1_psms':ctl_1, 'treat_1_psms':treat_1}
sample_list =['ctl_1_psms','treat_1_psms']
ctl_1_df = pd.DataFrame()
treat_1_df = pd.DataFrame()
df_list =[ctl_1_df,treat_1_df]
if args.replicate_count >= 2:
ctl_2 = pd.read_csv(args.control_elut_2, sep='\t')
treat_2 = pd.read_csv(args.treat_elut_2, sep='\t')
sample_dict['ctl_2_psms'] = ctl_2
sample_dict['treat_2_psms']= treat_2
sample_list.append('ctl_2_psms')
sample_list.append('treat_2_psms')
ctl_2_df = pd.DataFrame()
treat_2_df = pd.DataFrame()
df_list.append(ctl_2_df)
df_list.append(treat_2_df)
if args.replicate_count == 3:
ctl_3 = | pd.read_csv(args.control_elut_3, sep='\t') | pandas.read_csv |
# gather
import pandas as pd
import io
import time
import zipfile
import zlib
import urllib.request
urllib.request.urlretrieve('http://geoportal1-ons.opendata.arcgis.com/datasets/48d0b49ff7ec4ad0a4f7f8188f6143e8_3.zip',
'constituencies_super_generalised_shapefile.zip')
with zipfile.ZipFile('constituencies_super_generalised_shapefile.zip', 'r') as zip_ref:
zip_ref.extractall('constituencies_super_generalised_shapefile')
petition_list = pd.read_csv(
'https://petition.parliament.uk/archived/petitions.csv?parliament=1&state=published')
url_list = petition_list['URL'].tolist()
count, start = 0, time.time()
signatures, mp, errors = [], [], []
for petition_url in url_list:
try:
response = pd.read_json(petition_url + '.json')
response = pd.DataFrame.from_dict(response.iloc[0, 0], orient='index')
created_at = response.loc['created_at', 0]
response = pd.DataFrame.from_dict(
response.loc['signatures_by_constituency', 0])
response['created'] = created_at
signatures.extend(
response[['ons_code', 'signature_count', 'created']].values.tolist())
mp.extend(
response[['ons_code', 'name', 'mp']].values.tolist())
except:
errors.append(petition_url)
count += 1
if count % 250 == 0:
print('{} files reached in {}s'.format(count, time.time() - start))
if len(errors) != 0:
print(errors)
signatures = pd.DataFrame(
signatures, columns=['ons_code', 'signature_count', 'date'])
signatures['date'] = pd.to_datetime(signatures['date'])
signatures = signatures.set_index('date').groupby(
[pd.TimeGrouper(freq='M'), 'ons_code']).sum().reset_index().sort_values(['ons_code', 'date'])
signatures['date'] = signatures.date.dt.to_period('M')
mp = pd.DataFrame(mp, columns=['ons_code', 'constituency', 'mp']).drop_duplicates(
'ons_code', keep='last')
mp = mp.replace('Ynys M?n', 'Ynys Mon')
population = pd.read_excel(
'http://data.parliament.uk/resources/constituencystatistics/Population-by-age.xlsx', 'Data')
population = population[['ONSConstID', 'RegionName', 'PopTotalConstNum']].rename(
columns={'ONSConstID': 'ons_code', 'RegionName': 'region', 'PopTotalConstNum': 'population'})
eu = pd.read_excel(
'https://secondreading.parliament.uk/wp-content/uploads/2017/02/eureferendum_constitunecy.xlsx', 'DATA')
eu.columns = ['ons_code', 'constituency', 'ch_leave_estimate',
'result_known', 'known_leave', 'leave_figure']
eu = eu.loc[:, ['ons_code', 'leave_figure']].iloc[7:]
eu['stay_figure'] = 1 - eu['leave_figure']
eu = eu.drop('leave_figure', axis=1)
hex = | pd.read_json(
'https://odileeds.org/projects/hexmaps/maps/constituencies.hexjson') | pandas.read_json |
from collections import defaultdict
import json
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import tenkit
from tenkit_tools.utils import load_best_group
from sklearn.metrics import roc_auc_score
import h5py
RUN_FOLDER = Path("201128_noise_05_20_30_40")
LOG_FOLDER = RUN_FOLDER/"results"
NUM_EXPERIMENTS = 50
MAX_ITS = 1000
TIME_NAME = 'time'
EXPERIMENT_NAMES = [
"double_split",
"single_split_Dk",
# "double_split_NOPF2"
"flexible_coupling"
]
def load_logs(log_folder: Path, experiment_num: int) -> dict:
logs = {}
for experiment_name in EXPERIMENT_NAMES:
with (log_folder/f"{experiment_name}_{experiment_num:03d}.json").open() as f:
logs[experiment_name] = json.load(f)
return logs
#with (log_folder/f"single_split_C_{experiment_num:03d}.json").open() as f:
# single_split_C_results = json.load(f)
#with (log_folder/f"single_split_Dk_{experiment_num:03d}.json").open() as f:
# single_split_Dk_results = json.load(f)
#with (log_folder/f"flexible_coupling_{experiment_num:03d}.json").open() as f:
# flexible_coupling_results = json.load(f)
#return double_split_results, single_split_Dk_results
#return double_split_results, single_split_C_results, single_split_Dk_results, flexible_coupling_results
def load_double_split_logs(log_folder: Path, experiment_num: int) -> dict:
with (log_folder/f"double_split_{experiment_num:03d}.json").open() as f:
double_split_results = json.load(f)
return double_split_results,
def load_checkpoint(log_folder: Path, experiment_name: str, experiment_num: int) -> tenkit.decomposition.EvolvingTensor:
checkpoint_folder = log_folder/"checkpoints"
EvolvingTensor = tenkit.decomposition.EvolvingTensor
with h5py.File(checkpoint_folder/f"{experiment_name}_{experiment_num:03d}.h5", "r") as h5:
group = load_best_group(h5)
estimated = EvolvingTensor.load_from_hdf5_group(group)
return estimated
def load_checkpoints(log_folder: Path, experiment_num: int) -> list:
return {experiment_name: load_checkpoint(log_folder, experiment_name, experiment_num)
for experiment_name in EXPERIMENT_NAMES}
def load_decomposition(log_folder: Path, experiment_num: int) -> tenkit.decomposition.EvolvingTensor:
checkpoint_folder = log_folder/"decompositions"
EvolvingTensor = tenkit.decomposition.EvolvingTensor
with h5py.File(checkpoint_folder/f"{experiment_num:03d}.h5", "r") as h5:
estimated = EvolvingTensor.load_from_hdf5_group(h5["evolving_tensor"])
return estimated
def compute_accuracies(log_folder: Path, progress=False) -> dict:
accuracies = {}
if progress:
range_ = trange
else:
range_ = range
for experiment_num in range_(NUM_EXPERIMENTS):
checkpoints = load_checkpoints(log_folder, experiment_num)
true = load_decomposition(log_folder, experiment_num)
for name, decomposition in checkpoints.items():
if name not in accuracies:
accuracies[name] = {
'Sensitivity': [],
'Specificity': [],
'Dice': [],
'ROC AUC': [],
}
# Binarize
B = np.array(decomposition.B)
B /= np.linalg.norm(B, axis=1, keepdims=True)
estimated_map = abs(B) > 1e-8
true_map = np.array(true.B) > 1e-8
# Compute metrics
accuracies[name]['Sensitivity'].append(np.sum(estimated_map*true_map) / np.sum(true_map))
accuracies[name]['Specificity'].append(np.sum((1 - estimated_map)*(1 - true_map)) / np.sum(1 - true_map))
accuracies[name]['Dice'].append(2*np.sum(estimated_map*true_map) / (np.sum(true_map) + np.sum(estimated_map)))
accuracies[name]['ROC AUC'].append(roc_auc_score(true_map.ravel().astype(int), B.ravel()))
return accuracies
def create_summaries(experiment_log: dict) -> dict:
"""Takes a single result dict as input and creates a summary.
Summary just contains the logs for the final iteration.
"""
summary = {}
for key, value in experiment_log.items():
summary[key] = value[-1]
return summary
def load_summaries(log_folder, num_experiments: int) -> (dict, dict):
"""Take number of experiments as input and return two dicts, one for logs and one for summaries.
The keys of these dicts are the experiment types (e.g. double_split) and the values are dictionaries of lists.
The keys of the inner dictionaries are log-types (e.g. fms) and the values are lists.
The i-th element of these lists are the logs and summaries for the i-th experiment.
"""
logs = {
experiment_name: defaultdict(list) for experiment_name in EXPERIMENT_NAMES
}
summaries = {
experiment_name: defaultdict(list) for experiment_name in EXPERIMENT_NAMES
}
for i in range(num_experiments):
for experiment_name, log in load_logs(log_folder, i).items():
for key, value in log.items():
logs[experiment_name][key].append(value)
for experiment_name, log in load_logs(log_folder, i).items():
summary = create_summaries(log)
for key, value in summary.items():
summaries[experiment_name][key].append(value)
logs = {key: dict(value) for key, value in logs.items()}
summaries = {key: dict(value) for key, value in summaries.items()}
return logs, summaries
def load_double_split_summaries(log_folder, num_experiments: int) -> (dict, dict):
"""Take number of experiments as input and return two dicts, one for logs and one for summaries.
The keys of these dicts are the experiment types (e.g. double_split) and the values are dictionaries of lists.
The keys of the inner dictionaries are log-types (e.g. fms) and the values are lists.
The i-th element of these lists are the logs and summaries for the i-th experiment.
"""
experiment_names = (
'double_split',
)
logs = {
experiment_name: defaultdict(list) for experiment_name in experiment_names
}
summaries = {
experiment_name: defaultdict(list) for experiment_name in experiment_names
}
for i in range(num_experiments):
for experiment_name, log in zip(experiment_names, load_double_split_logs(log_folder, i)):
for key, value in log.items():
logs[experiment_name][key].append(value)
for experiment_name, log in zip(experiment_names, load_double_split_logs(log_folder, i)):
summary = create_summaries(log)
for key, value in summary.items():
summaries[experiment_name][key].append(value)
return logs, summaries
def make_log_array(log_list: list) -> np.array:
"""Takes uneven list of logs and creates a 2D numpy array where the last element of each list is used as padding.
"""
log_array = np.zeros((NUM_EXPERIMENTS, MAX_ITS))
for i, log in enumerate(log_list):
num_its = len(log)
log_array[i, :num_its] = log
log_array[i, num_its:] = log[-1]
return log_array
def make_summary_df(summaries):
"""Convert nested dictionary of summaries (inner dicts represent summaries for one method) into a single dataframe.
"""
summary_dfs = {method: pd.DataFrame(summary) for method, summary in summaries.items()}
for method, summary_df in summary_dfs.items():
summary_df['Method'] = method
summary_df["Dataset num"] = summary_df.index
for method in summary_dfs:
summary_dfs[method] = summary_dfs[method].set_index(["Method", "Dataset num"])
return | pd.concat(summary_dfs) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import rampy
import warnings
import csv
PLOT = False
RED_LABEL = 'CY3(1)'
GREEN_LABEL = 'FITC(1)'
PINK_LABEL = 'CY5(1)'
COLUMN_TITLES = ["Index", "Peak Ratio", "Green Test", "Red Test", "Pink Background", "Pink Max"]
# filename = 'data/RDART004baseline_testcase.xlsx'
filename = 'data/RDART004baseline.xlsx'
data = | pd.ExcelFile(filename) | pandas.ExcelFile |
import calendar
import pandas as pd
from colourutils import extend_colour_map
def extend_data_range(data):
"""
Extends the index of the given Series so that it has daily values, starting from the 1st of the earliest month and
ending on the last day of the latest month.
:param data: The Series to be extended with a datetime index
:return: The Series with an extended daily index
"""
earliest_date = data.index.min()
first_month_start = pd.Timestamp(year=earliest_date.year, month=earliest_date.month, day=1)
latest_date = data.index.max()
_, last_date_of_month = calendar.monthrange(latest_date.year, latest_date.month)
last_month_end = pd.Timestamp(year=latest_date.year, month=latest_date.month, day=last_date_of_month)
if first_month_start not in data:
data[first_month_start] = None
if last_month_end not in data:
data[last_month_end] = None
data = data.groupby(data.index).agg(list) # Collate multiple values for a given date as a list
data = data.sort_index().asfreq('D') # Resample as daily data
data = data.explode() # Unpack the list over multiple rows
return data
def apply_colour_map(data, colour_map, date_colour, exclude_colour, min_date, max_date):
"""
Converts the events in the Series to a colour value using the colour map for mapped events, and the default date
and exclude colours for other dates.
:param data: The events Series
:param colour_map: A map from event to colour
:param date_colour: The default colour for a date square
:param exclude_colour: The colour for a date square which falls outside of the start/end date
:param min_date: Dates with null data before this date will use the exclude colour
:param max_date: Dates with null data after this date will use the exclude colour
:return: A Series with a colour value for every date
"""
data = data.map(colour_map)
data[ | pd.isna(data) | pandas.isna |
import json
import pandas as pd
from pathlib import Path
from tqdm import tqdm
def extract_columns(dataset):
resource = dataset['resource']
names = resource['columns_name']
field_name = resource['columns_field_name']
field_type = resource['columns_datatype']
descriptions = resource['columns_description']
return | pd.DataFrame({'name':names, 'field_type': field_type, 'field_name': field_name, 'description':descriptions, 'dataset-id' : resource['id'], 'dataset_link':dataset['permalink']}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script uses an LSTM network in keras to predict the tides (sea level) as a function of astronomical motions
It is based on an example given in
https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/
For this script to work it is necessary to already have the time series of the relative postion of the Moon and the Sun.
This time series can be obtained by running the script
@author: <NAME> (<EMAIL>)
"""
#%%
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import Adam
import keras_tuner as kt
import pandas as pd
import numpy as np
import datetime
# %% convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, n_f=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in-1, 0, -1):
cols.append(df.loc[:,0:(n_f-1)].shift(i))
#
for i in range(n_in, 0, -1):
names += [('var%d(t-%d)' % (j+1, i-1)) for j in range(n_f)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % data.shape[1])]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_out)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# %%
# %% Load data
# Load sea level data
p_name = "test_RES_0"
in_folder = ('../data/')
# Load predictors
predictors = {"files": ['uerra_10min_a.csv', 'uerra_10min_b.csv'],
"keys": ['wind_speed', 'cosine_wind_angle', 'sine_wind_angle', 'pressure'],
}
L = pd.read_csv('../data/DenHeld_HA.csv')
d_in = {}
T = pd.read_csv(in_folder+predictors["files"][0],usecols=['time'])
T['time']= | pd.to_datetime(T['time']) | pandas.to_datetime |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import plotly.graph_objs as go
from src.visualization.utils import get_palette
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
################################################################################################################
# LOAD AND PROCESS DATA
# Load
df = pd.read_csv('./data/processed/GBD_child_health_indicators.csv')
location_metadata = pd.read_csv('./data/metadata/gbd_location_metadata.csv')
all_indicators = list(df.indicator.unique())
# There are some wonky indicators to not include in default list
my_inds = [x for x in all_indicators if x not in ['Low birth weight for gestation', 'Short gestation for birth weight']]
n_neighbors = 4
year_ids = list(df.year_id.unique())
# Indicator Value by country in wide format
index_vars = ['location_name', 'year_id']
df_wide = df.pivot_table(index=index_vars, columns='indicator', values='val').reset_index()
df_wide = pd.merge(location_metadata, df_wide)
################################################################################################################
top_markdown_text = '''
### Global Burden of Disease - Child Health Indicators
'''
overview_markdown_text = '''
The Global Burden of Disease estimates many child health indicators that inform efforts to achieve Sustainable Development Goal 3.2:
> By 2030, end preventable deaths of newborns and children under 5 years of age, with all countries aiming to reduce [...] under-5 mortality to at least as low as 25 per 1000 live births.
This clustering analysis examines how epidemiologic patterns can both follow and defy traditional geographic categories.
Clusters are assigned by a k-means clustering algorithm using selected indicators and number of clusters.
**Indicator values are scaled 0-100 with 100 representing highest burden**
*To learn more, here is the corresponding blog post on [Data, Science, and Sustainable Development](https://medium.com/@zwrankin/data-science-and-sustainable-development-challenging-historical-paradigms-with-k-means-b1b39305e3e7)*
'''
# 0 represents the 2.5th percentile of globally observed values and 100 the 97.5th percentile.
# Available indicators include the top global risks and causes from 2017.
bottom_markdown_text = '''
Estimates by the [Institute for Health Metrics and Evaluation](http://www.healthdata.org/) and available
[here](http://ghdx.healthdata.org/gbd-2017)
Visualization by [<NAME>](https://github.com/zwrankin/health_indicators)
'''
# Visit [GBD Compare](https://vizhub.healthdata.org/gbd-compare/#) for complete GBD results visualization.
def make_colorscale(n):
"""Maps [0,n] palette to [0,1] scale to fit Plotly colorscale"""
return [[k / (n - 1), get_palette(n)[k]] for k in range(0, n)]
# Style settings for Plotly graphs (Plotly does not provide CSS interface - https://community.plot.ly/t/set-plot-colours-with-external-css/6319)
plotly_font_size = 16
app.layout = html.Div([
# LEFT - Global options and map
html.Div([
dcc.Markdown(children=top_markdown_text),
html.P('Number of clusters'),
dcc.Slider(
id='n-clusters',
min=2,
max=7,
step=1,
marks={i: str(i) for i in range(2, 7 + 1)},
value=6,
),
html.P('.'),
dcc.RadioItems(
id='year',
options=[{'label': i, 'value': i} for i in year_ids],
value=2017,
labelStyle={'display': 'inline-block'},
),
html.P('Indicators to include in clustering algorithm'),
dcc.Dropdown(
id='indicators',
options=[{'label': i, 'value': i} for i in all_indicators],
multi=True,
value=[i for i in my_inds]
),
html.P('.'),
dcc.Markdown('*Hover over map to select country for plots*'),
dcc.Graph(id='county-choropleth'),
dcc.Markdown(children=bottom_markdown_text)
], style={'float': 'left', 'width': '39%', 'display': 'inline-block', 'padding': '0 20'}),
html.Div([
dcc.Markdown(children=overview_markdown_text),
], style={'float': 'right', 'width': '59%', 'display': 'inline-block', 'padding': '0 20'}),
# RIGHT - Tabs
html.Div([
dcc.Tabs(id="tabs", style={
'textAlign': 'left', 'margin': '48px 0', 'fontSize': 18, 'color': 'blue'}, children=[
dcc.Tab(label='Clustering', children=[
# Hidden div stores the clustering model results to share between callbacks
html.Div(id='clustered-data', style={'display': 'none'}),
html.Div([
# Dropdown options are set dynamically through callback
dcc.Dropdown(
id='xaxis-column',
value='Low GDP per capita'
),
dcc.Dropdown(
id='yaxis-column',
value='Under-5 Mortality Rate'
),
]),
html.Div([
dcc.Graph(id='scatterplot'),
], style={'fontSize': plotly_font_size}),
]),
dcc.Tab(label='Comparisons', children=[
dcc.Markdown('*Locations to compare*'),
dcc.RadioItems(
id='entity-type',
options=[{'label': i, 'value': i} for i in ['Countries', 'Clusters']],
value='Countries',
labelStyle={'display': 'inline-block'},
),
dcc.Markdown('*Whether to plot value or comparison to selected location*'),
dcc.RadioItems(
id='comparison-type',
options=[{'label': i, 'value': i} for i in ['Value', 'Comparison']],
value='Value',
labelStyle={'display': 'inline-block'},
),
dcc.Markdown('*Connect dots*'),
dcc.RadioItems(
id='connect-dots',
options=[{'label': 'No', 'value': False}, {'label': 'Yes', 'value': True}],
value=True,
labelStyle={'display': 'inline-block'},
),
# daq seems to have Heroku compatibility issues
# daq.BooleanSwitch(
# id='connect-dots',
# label="Connect Dots",
# on=True,
# ),
dcc.Dropdown(
id='countries',
options=[{'label': i, 'value': i} for i in df_wide.location_name.unique()],
placeholder="Select additional countries to plot",
multi=True,
),
dcc.Graph(id='similarity_scatter'),
]),
dcc.Tab(label='Time Trends', children=[
dcc.Markdown(
'*For Under-5 Mortality forecasted until 2030, see [SDG Visualization](http://ihmeuw.org/4prj)*'),
dcc.Graph(id='time-series'),
]),
# dcc.Tab(label='Parallel Coordinates', children=[
# dcc.Graph(id='parallel-coords'),
# dcc.Markdown('*Tips: drag along y axis to subset lines, and drag indicator names to reorder columns*')
# ]),
]),
], style={'float': 'right', 'width': '59%', 'display': 'inline-block', 'padding': '0 20'}),
])
@app.callback(Output('xaxis-column', 'options'),
[Input('indicators', 'value')])
def set_xaxis_options(indicators):
return [{'label': i, 'value': i} for i in indicators]
@app.callback(Output('yaxis-column', 'options'),
[Input('indicators', 'value')])
def set_yaxis_options(indicators):
return [{'label': i, 'value': i} for i in indicators]
@app.callback(Output('clustered-data', 'children'),
[Input('n-clusters', 'value'),
Input('indicators', 'value'),
Input('year', 'value')])
def cluster_kmeans(n_clusters, indicators, year):
df_c = df_wide.query(f'year_id == {year}')[['location_name'] + indicators].set_index('location_name')
kmean = KMeans(n_clusters=n_clusters, random_state=0)
kmean.fit(df_c)
# Rank cluster ids by mean U5MR within cluster
df_ordered = df_wide.query(f'year_id == {year}')
df_ordered['cluster'] = kmean.labels_
df_ordered = df_ordered.groupby('cluster')['Under-5 Mortality Rate'].mean().reset_index()
df_ordered['U5MR_rank'] = df_ordered['Under-5 Mortality Rate'].rank().astype(
'int') - 1 # rank starts at 1, we want 0-indexed
cluster_map = df_ordered.set_index('cluster')['U5MR_rank'].to_dict()
# Set cluster equal to U5MR rank
df_c.reset_index(inplace=True)
df_c['cluster'] = pd.Series(kmean.labels_).map(cluster_map)
df_c = pd.merge(location_metadata, df_c)
df_c['color'] = df_c.cluster.map(get_palette(n_clusters))
return df_c.to_json()
@app.callback(
Output('county-choropleth', 'figure'),
[Input('clustered-data', 'children')])
def update_map(data_json):
df_c = pd.read_json(data_json)
n_clusters = len(df_c.cluster.unique())
colorscale = make_colorscale(n_clusters)
return dict(
data=[dict(
locations=df_c['ihme_loc_id'],
z=df_c['cluster'].astype('float'),
text=df_c['location_name'],
colorscale=colorscale,
autocolorscale=False,
type='choropleth',
showscale=False, # Color key unnecessary since clusters are arbitrary and have key in scatterplot
)],
layout=dict(
# title='Hover over map to select country to plot',
height=400,
font=dict(size=plotly_font_size),
margin={'l': 0, 'b': 0, 't': 0, 'r': 0},
geo=dict(showframe=False,
projection={'type': 'Mercator'}))
)
@app.callback(
Output('scatterplot', 'figure'),
[Input('xaxis-column', 'value'),
Input('yaxis-column', 'value'),
Input('county-choropleth', 'hoverData'),
Input('clustered-data', 'children')])
def update_graph(xaxis_column_name, yaxis_column_name, hoverData, data_json):
if hoverData is None: # Initialize before any hovering
location_name = 'Nigeria'
else:
location_name = hoverData['points'][0]['text']
df_c = pd.read_json(data_json).sort_values('cluster')
# Make size of marker respond to map hover
df_c['size'] = 12
df_c.loc[df_c.location_name == location_name, 'size'] = 30
# Make selected country last (so it plots on top)
df_c = pd.concat([df_c[df_c.location_name != location_name], df_c[df_c.location_name == location_name]])
return {
'data': [
go.Scatter(
x=df_c[df_c['cluster'] == i][xaxis_column_name],
y=df_c[df_c['cluster'] == i][yaxis_column_name],
text=df_c[df_c['cluster'] == i]['location_name'],
mode='markers',
opacity=0.7,
marker={
'size': df_c[df_c['cluster'] == i]['size'], # 12,
'color': df_c[df_c['cluster'] == i]['color'], # palette[i], #
'line': {'width': 0.5, 'color': 'white'}
},
name=f'Cluster {i}'
) for i in df_c.cluster.unique()
],
'layout': go.Layout(
height=500,
xaxis={'title': xaxis_column_name},
yaxis={'title': yaxis_column_name},
margin={'l': 80, 'b': 60, 't': 10, 'r': 10},
legend={'x': 0, 'y': 1},
hovermode='closest',
font=dict(size=plotly_font_size),
)
}
@app.callback(
Output('similarity_scatter', 'figure'),
[Input('county-choropleth', 'hoverData'),
Input('entity-type', 'value'),
Input('comparison-type', 'value'),
Input('indicators', 'value'),
Input('year', 'value'),
Input('countries', 'value'),
Input('connect-dots', 'value'),
Input('clustered-data', 'children')])
def update_scatterplot(hoverData, entity_type, comparison_type, indicators, year, countries, connect_dots, data_json):
if hoverData is None: # Initialize before any hovering
location_name = 'Nigeria'
cluster = 0
else:
location_name = hoverData['points'][0]['text']
cluster = hoverData['points'][0]['z']
if connect_dots:
mode = 'lines+markers'
else:
mode = 'markers'
if entity_type == 'Countries':
data = df_wide.query(f'year_id == {year}')[['location_name'] + indicators].set_index('location_name')
l_data = data.loc[location_name]
similarity = np.abs(data ** 2 - l_data ** 2).sum(axis=1).sort_values()
locs = similarity[:n_neighbors + 1].index.tolist()
if countries is not None:
locs += countries
df_similar = data.loc[locs]
if comparison_type == 'Value':
title = f'Indicators of {location_name} and similar countries'
elif comparison_type == 'Comparison':
df_similar = (df_similar - l_data)
title = f'Indicators of countries relative to {location_name}'
df_similar = df_similar.reset_index().melt(id_vars='location_name', var_name='indicator')
# Sort by similarity
df_similar = pd.merge(df_similar, pd.Series(similarity, name='similarity').reset_index())
df_similar.sort_values(['similarity', 'indicator'], ascending=[True, False], inplace=True)
df_similar['size'] = 10
df_similar.loc[df_similar.location_name == location_name, 'size'] = 14
plot = [go.Scatter(
x=df_similar[df_similar['location_name'] == i]['value'],
y=df_similar[df_similar['location_name'] == i]['indicator'],
text=str(i),
mode=mode,
opacity=1 if i == location_name else .75,
marker={
'size': df_similar[df_similar['location_name'] == i]['size'],
'line': {'width': 0.5, 'color': 'white'}
},
name=str(i)
) for i in df_similar.location_name.unique()]
elif entity_type == 'Clusters':
df_c = | pd.read_json(data_json) | pandas.read_json |
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import pymortar
import datetime
import pandas as pd
import argparse
def get_all_points(client, site=None):
query = """SELECT ?point ?point_type WHERE { ?point rdf:type/rdfs:subClassOf* brick:Point . ?point rdf:type ?point_type . };"""
if site == None:
resp = client.qualify([query])
if resp.error != "" :
print("ERROR: ", resp.error)
return | pd.DataFrame() | pandas.DataFrame |
import os
import unittest
import random
import sys
import site # so that ai4water directory is in path
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import scipy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from ai4water import Model
from ai4water.preprocessing import DataHandler, SiteDistributedDataHandler
from ai4water.preprocessing.datahandler import MultiLocDataHandler
from ai4water.datasets import load_u1, arg_beach
os.environ['PYTHONHASHSEED'] = '313'
random.seed(313)
np.random.seed(313)
# todo, check last dimension of x,y
# todo test with 3d y
def _check_xy_equal_len(x, prev_y, y, lookback, num_ins, num_outs, num_examples, data_type='training'):
feat_dim = 1
if lookback > 1:
assert x.shape[1] == lookback
feat_dim = 2
assert x.shape[
feat_dim] == num_ins, f"for {data_type} x's shape is {x.shape} while num_ins of dataloader are {num_ins}"
if y is not None:
assert y.shape[1] == num_outs, f"for {data_type} y's shape is {y.shape} while num_outs of dataloader are {num_outs}"
else:
assert num_outs == 0
y = x # just for next statement to run
if prev_y is None:
prev_y = x # just for next statement to run
assert x.shape[0] == y.shape[0] == prev_y.shape[
0], f"for {data_type} xshape: {x.shape}, yshape: {y.shape}, prevyshape: {prev_y.shape}"
if num_examples:
assert x.shape[
0] == num_examples, f'for {data_type} x contains {x.shape[0]} samples while expected samples are {num_examples}'
return
def assert_xy_equal_len(x, prev_y, y, data_loader, num_examples=None, data_type='training'):
if isinstance(x, np.ndarray):
_check_xy_equal_len(x, prev_y, y, data_loader.lookback, data_loader.num_ins, data_loader.num_outs, num_examples,
data_type=data_type)
elif isinstance(x, list):
while len(y)<len(x):
y.append(None)
for idx, i in enumerate(x):
_check_xy_equal_len(i, prev_y[idx], y[idx], data_loader.lookback[idx], data_loader.num_ins[idx],
data_loader.num_outs[idx], num_examples, data_type=data_type
)
elif isinstance(x, dict):
for key, i in x.items():
_check_xy_equal_len(i, prev_y.get(key, None), y.get(key, None), data_loader.lookback[key], data_loader.num_ins[key],
data_loader.num_outs[key], num_examples, data_type=data_type
)
elif x is None: # all should be None
assert all(v is None for v in [x, prev_y, y])
else:
raise ValueError
def _check_num_examples(train_x, val_x, test_x, val_ex, test_ex, tot_obs):
val_examples = 0
if val_ex:
val_examples = val_x.shape[0]
test_examples = 0
if test_ex:
test_examples = test_x.shape[0]
xyz_samples = train_x.shape[0] + val_examples + test_examples
# todo, whould be equal
assert xyz_samples == tot_obs, f"""
data_loader has {tot_obs} examples while sum of train/val/test examples are {xyz_samples}."""
def check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader):
if isinstance(train_x, np.ndarray):
_check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader.tot_obs_for_one_df())
elif isinstance(train_x, list):
for idx in range(len(train_x)):
_check_num_examples(train_x[idx], val_x[idx], test_x[idx], val_ex, test_ex,
data_loader.tot_obs_for_one_df()[idx])
return
def check_inverse_transformation(data, data_loader, y, cols, key):
if cols is None:
# not output columns, so not checking
return
# check that after inverse transformation, we get correct y.
if data_loader.source_is_df:
train_y_ = data_loader.inverse_transform(data=pd.DataFrame(y.reshape(-1, len(cols)), columns=cols), key=key)
train_y_, index = data_loader.deindexify(train_y_, key=key)
compare_individual_item(data, key, cols, train_y_, data_loader)
elif data_loader.source_is_list:
#for idx in range(data_loader.num_sources):
# y_ = y[idx].reshape(-1, len(cols[idx]))
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for idx, y in enumerate(train_y_):
compare_individual_item(data[idx], f'{key}_{idx}', cols[idx], y, data_loader)
elif data_loader.source_is_dict:
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for src_name, val in train_y_.items():
compare_individual_item(data[src_name], f'{key}_{src_name}', cols[src_name], val, data_loader)
def compare_individual_item(data, key, cols, y, data_loader):
if y is None:
return
train_index = data_loader.indexes[key]
if y.__class__.__name__ in ['DataFrame']:
y = y.values
for i, v in zip(train_index, y):
if len(cols) == 1:
if isinstance(train_index, pd.DatetimeIndex):
# if true value in data is None, y's value should also be None
if np.isnan(data[cols].loc[i]).item():
assert np.isnan(v).item()
else:
_t = round(data[cols].loc[i].item(), 0)
_p = round(v.item(), 0)
if not np.allclose(data[cols].loc[i].item(), v.item()):
print(f'true: {_t}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(v, np.ndarray):
v = round(v.item(), 3)
_true = round(data[cols].loc[i], 3).item()
_p = round(v, 3)
if _true != _p:
print(f'true: {_true}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(train_index, pd.DatetimeIndex):
assert abs(data[cols].loc[i].sum() - np.nansum(v)) <= 0.00001, f'{data[cols].loc[i].sum()},: {v}'
else:
assert abs(data[cols].iloc[i].sum() - v.sum()) <= 0.00001
def check_kfold_splits(data_handler):
if data_handler.source_is_df:
splits = data_handler.KFold_splits()
for (train_x, train_y), (test_x, test_y) in splits:
... # print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
return
def assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader):
if isinstance(train_y, list):
assert isinstance(val_y, list)
assert isinstance(test_y, list)
train_y = train_y[0]
val_y = val_y[0]
test_y = test_y[0]
if isinstance(train_y, dict):
train_y = list(train_y.values())[0]
assert isinstance(val_y, dict)
isinstance(test_y, dict)
val_y = list(val_y.values())[0]
test_y = list(test_y.values())[0]
if out_cols is not None:
b = train_y.reshape(-1, )
if val_y is None:
a = test_y.reshape(-1, )
else:
a = val_y.reshape(-1, )
if not len(np.intersect1d(a, b)) == 0:
raise ValueError(f'train and val have overlapping values')
if data_loader.val_data != 'same' and out_cols is not None and val_y is not None and test_y is not None:
a = test_y.reshape(-1,)
b = val_y.reshape(-1,)
assert len(np.intersect1d(a, b)) == 0, 'test and val have overlapping values'
return
def build_and_test_loader(data, config, out_cols, train_ex=None, val_ex=None, test_ex=None, save=True,
assert_uniqueness=True, check_examples=True,
true_train_y=None, true_val_y=None, true_test_y=None):
config['teacher_forcing'] = True # todo
if 'val_fraction' not in config:
config['val_fraction'] = 0.3
if 'test_fraction' not in config:
config['test_fraction'] = 0.3
data_loader = DataHandler(data=data, save=save, verbosity=0, **config)
#dl = DataLoader.from_h5('data.h5')
train_x, prev_y, train_y = data_loader.training_data(key='train')
assert_xy_equal_len(train_x, prev_y, train_y, data_loader, train_ex)
val_x, prev_y, val_y = data_loader.validation_data(key='val')
assert_xy_equal_len(val_x, prev_y, val_y, data_loader, val_ex, data_type='validation')
test_x, prev_y, test_y = data_loader.test_data(key='test')
assert_xy_equal_len(test_x, prev_y, test_y, data_loader, test_ex, data_type='test')
if check_examples:
check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader)
if isinstance(data, str):
data = data_loader.data
check_inverse_transformation(data, data_loader, train_y, out_cols, 'train')
if val_ex:
check_inverse_transformation(data, data_loader, val_y, out_cols, 'val')
if test_ex:
check_inverse_transformation(data, data_loader, test_y, out_cols, 'test')
check_kfold_splits(data_loader)
if assert_uniqueness:
assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader)
if true_train_y is not None:
assert np.allclose(train_y, true_train_y)
if true_val_y is not None:
assert np.allclose(val_y, true_val_y)
if true_test_y is not None:
assert np.allclose(test_y, true_test_y)
return data_loader
class TestAllCases(object):
def __init__(self, input_features, output_features, lookback=3, allow_nan_labels=0, save=True):
self.input_features = input_features
self.output_features = output_features
self.lookback = lookback
self.allow_nan_labels=allow_nan_labels
self.save=save
self.run_all()
def run_all(self):
all_methods = [m for m in dir(self) if callable(getattr(self, m)) and not m.startswith('_') and m not in ['run_all']]
for m in all_methods:
getattr(self, m)()
return
def test_basic(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
val_examples = 22 - (self.lookback - 2) if self.lookback>1 else 22
test_examples = 30 - (self.lookback - 2) if self.lookback>1 else 30
if self.output_features == ['c']:
tty = np.arange(202, 250).reshape(-1, 1, 1)
tvy = np.arange(250, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, val_examples, test_examples,
save=self.save,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
check_examples=True,
)
assert loader.source_is_df
return
def test_with_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random'}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 20, 30,
save=self.save,
)
assert loader.source_is_df
return
def test_drop_remainder(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'batch_size': 8,
'drop_remainder': True,
'train_data': 'random'}
loader = build_and_test_loader(data, config, self.output_features,
48, 16, 24,
check_examples=False,
save=self.save,
)
assert loader.source_is_df
return
def test_with_same_val_data(self):
# val_data is "same" as and train_data is make based upon fractions.
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same'}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 29, 29,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save,
check_examples=False
)
assert loader.source_is_df
return
def test_with_same_val_data_and_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_data': 'same'}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 30, 30,
check_examples=False,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_val_data(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 29,
true_train_y=tty,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_no_val_data_with_random(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_fraction': 0.0}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 30,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_test_data(self):
# we don't want any test_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'test_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 29, 0,
true_train_y=tty,
true_val_y=tvy,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_test_data_with_random(self):
# we don't want any test_data
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = | pd.DataFrame(data, columns=['a', 'b', 'c']) | pandas.DataFrame |
# The normal imports
import numpy as np
from numpy.random import randn
import pandas as pd
# Import the stats library from numpy
from scipy import stats
# These are the plotting modules adn libraries we'll use:
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Again seaborn comes with a great dataset to play and learn with
flight_dframe = sns.load_dataset('flights')
# Preview
flight_dframe.head()
# Let's pivot this dataframe do its easier to manage
flight_dframe = flight_dframe.pivot("month", "year", "passengers")
# Show
flight_dframe
# This dataset is now in a clear format to be dispalyed as a heatmap
sns.heatmap(flight_dframe)
# We also have the option to annotate each cell
sns.heatmap(flight_dframe, annot=True, fmt='d')
# seaborn will automatically try to pick the best color scheme for your
# dataset, whether is be diverging or converging colormap
# We can choose our own 'center' for our colormap
sns.heatmap(flight_dframe, center=flight_dframe.loc['January', 1955])
# heatmap() can be used on an axes for a subplot to create more
# informative figures
f, (axis1, axis2) = plt.subplots(2, 1)
yearly_flights = flight_dframe.sum()
# Since yearly_flights is a weird format, we'll have to grab the values we
# want with a Series, then put them in a dframe
years = pd.Series(yearly_flights.index.values)
years = pd.DataFrame(years)
flights = | pd.Series(yearly_flights.values) | pandas.Series |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
from sklearn.compose import ColumnTransformer
import sklearn.preprocessing as skp
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing
from shapash.utils.columntransformer_backend import get_feature_names, get_names, get_list_features_names
# TODO
# StandardScaler return object vs float vs int
# Target encoding return object vs float
class TestInverseTransformColumnsTransformer(unittest.TestCase):
def test_inv_transform_ct_1(self):
"""
test inv_transform_ct with multiple encoding and drop option
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['chicago', 'chicago', 'paris'],
'onehot_ce_state': ['US', 'FR', 'FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1']
result.index = ['index1', 'index2', 'index3']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_2(self):
"""
test inv_transform_ct with multiple encoding and passthrough option
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['chicago', 'chicago', 'paris'],
'onehot_ce_state': ['US', 'FR', 'FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1', 'other']
result.index = ['index1', 'index2', 'index3']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_3(self):
"""
test inv_transform_ct with multiple encoding and dictionnary
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['CH', 'CH', 'PR'],
'onehot_ce_state': ['US-FR', 'US-FR', 'US-FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR'],
'other': ['A-B', 'A-B', 'C']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1', 'other']
result.index = ['index1', 'index2', 'index3']
input_dict1 = dict()
input_dict1['col'] = 'onehot_ce_city'
input_dict1['mapping'] = pd.Series(data=['chicago', 'paris'], index=['CH', 'PR'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'other'
input_dict2['mapping'] = pd.Series(data=['A', 'B', 'C'], index=['A-B', 'A-B', 'C'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'onehot_ce_state'
input_dict3['mapping'] = pd.Series(data=['US', 'FR'], index=['US-FR', 'US-FR'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
original = inverse_transform(result, [enc,input_dict1,list_dict])
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_4(self):
"""
test inv_transform_ct with single target category encoders and passthrough option
"""
y = | pd.DataFrame(data=[0, 1, 1, 1], columns=['y']) | pandas.DataFrame |
from sklearn import preprocessing, metrics
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold, KFold, RepeatedKFold, GroupKFold, GridSearchCV, train_test_split, TimeSeriesSplit
from datetime import datetime
import copy
import os
import fire
import glob
import pdb
##### import all Feature engineering functions
from util_feat_m5 import *
def features_to_category(df, nan_cols, cat_cols):
nan_features = nan_cols
for feature in nan_features:
df[feature].fillna('unknown', inplace = True)
categorical_cols = cat_cols
for feature in categorical_cols:
encoder = preprocessing.LabelEncoder()
df[feature] = encoder.fit_transform(df[feature].astype(str))
return df
def update_meta_csv(featnames, filename, cat_cols):
meta_csv = pd.DataFrame(columns = ['featname', 'filename', 'feattype'])
if os.path.exists('meta_features.csv'):
meta_csv = pd.read_csv('meta_features.csv')
append_data_dict = {'featname' : [], 'filename' : [], 'feattype' : []}
for feat in featnames:
if feat not in meta_csv['featname'].unique():
append_data_dict['filename'].append(filename)
append_data_dict['featname'].append(feat)
feat_type = "numeric" if feat not in cat_cols else "categorical"
append_data_dict['feattype'].append(feat_type)
else:
meta_csv.loc[meta_csv['featname'] == feat, 'filename'] = filename
append_df = pd.DataFrame.from_dict(append_data_dict)
meta_csv = meta_csv.append(append_df)
meta_csv.to_csv('meta_features.csv', index = False)
def get_cat_num_features_from_meta_csv(id_cols, dep_col):
drop_cols = id_cols + [dep_col]
meta_csv = pd.read_csv('meta_features.csv')
num_feats = [ x for x in meta_csv[meta_csv["feattype"] == "numeric"]['featname'].tolist() if x not in drop_cols]
cat_feats = [ x for x in meta_csv[meta_csv["feattype"] == "categorical"]['featname'].tolist() if x not in drop_cols]
return cat_feats, num_feats
def get_file_feat_from_meta_csv(selected_cols, id_cols):
meta_csv = pd.read_csv('meta_features.csv')
file_feat_mapping = {k:id_cols for k in meta_csv['filename'].unique().tolist()}
for selected_col in selected_cols:
selected_col_meta_df = meta_csv[meta_csv["featname"] == selected_col]
file_feat_mapping[selected_col_meta_df['filename'].tolist()[0]].append(selected_col)
print(id_cols)
return {k:list(set(v)) for k,v in file_feat_mapping.items()}
def features_generate_file(dir_in, dir_out, my_fun_features, features_group_name, input_raw_path = None, auxiliary_csv_path = None, drop_cols = None, index_cols = None, merge_cols_mapping = None, cat_cols = None, id_cols=None, dep_col = None, max_rows = 5, step_wise_saving = False) :
# from util_feat_m5 import lag_featrues
# featurestore_generate_feature(dir_in, dir_out, lag_featrues)
merged_df = pd.read_parquet(dir_in + "/raw_merged.df.parquet")
dfnew, cat_cols= my_fun_features(merged_df, input_raw_path, dir_out, features_group_name, auxiliary_csv_path, drop_cols, index_cols, merge_cols_mapping, cat_cols, id_cols, dep_col, max_rows)
if not step_wise_saving:
dfnew.to_parquet(f'{dir_out}/{features_group_name}.parquet')
# num_cols = list(set(dfnew._get_numeric_data().columns))
update_meta_csv(dfnew.columns, f'{features_group_name}.parquet', cat_cols)
def feature_merge_df(df_list, cols_join):
print(cols_join)
dfall = None
for dfi in df_list :
print(dfi.columns)
cols_joini = [ t for t in cols_join if t in dfi.columns ]
dfall = dfall.join(dfi.set_index(cols_joini), on = cols_joini, how="left") if dfall is not None else dfi
return dfall
def raw_merged_df(input_path="data/", out_path='out/', index_cols = None, dep_col = None, raw_merge_cols = None, merge_cols_mapping = None, nan_cols = None, cat_cols = None, max_rows=10):
df_sales_train = pd.read_csv(input_path + "/sales_train_gen.csv")
df_calendar = pd.read_csv(input_path + "/calendar_gen.csv")
df_sales_val = | pd.read_csv(input_path + "/sales_train_gen.csv") | pandas.read_csv |
# Percentage of homeowners & renters devoting 30+% of household income to housing
# Source: Census (census.data.gov) advanced search (Topics: 'Housing' & 'Income and Poverty'; Geography: All US Counties; Years: ACS 5-Yr. Estimates)
import pandas as pd
import numpy as np
import os
county_area_df = pd.read_csv('counties_2010_pop_and_house_density.csv') # Extracting county area (to calculate pop & housing density)
county_vacancy_df = pd.read_csv('county_vacancy_raw_5yr_0816.csv') # Total units vacant
county_hh_df = | pd.read_csv('irs_mig_91to2018.csv') | pandas.read_csv |
import pandas as pd
import pytest
from deep_sentinel import dataset
@pytest.mark.parametrize(
"given,expected", [
({
'a': [1, 2, 3],
}, {
'a': 2.0,
}),
({
'a': [1, 2, 3],
'b': [3, 2, 1],
'c': [1, 1, 1],
}, {
'a': 2.0,
'b': 2.0,
'c': 1.0
})
]
)
def test_get_mean(given, expected):
expected = | pd.Series(expected) | pandas.Series |
"""
Pull down fresh data from Google Sheets to CSV
"""
import pytz
import pickle
import string
import os.path
import pandas as pd
import geopandas as gpd
from datetime import datetime
from googleapiclient.discovery import build
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from scripts.common import (
list_commissioners
, assemble_divo
, build_district_comm_commelect
)
class RefreshData():
def __init__(self):
# If modifying these scopes, delete the file token.pickle.
self.scopes = ['https://www.googleapis.com/auth/spreadsheets']
# The ID and range of a sample spreadsheet.
self.spreadsheet_ids = {
'openanc_source': '1QGki43vKLKJyG65Rd3lSKJwO_B3yX96SCljzmd9YJhk'
, 'openanc_published': '1XoT92wFBKdmnUc6AXwABeWNjsjcwrGMPMKu1XsBOygU'
}
self.service = self.google_auth()
def google_auth(self):
"""
Autheticate to Google Sheets API
Source: https://developers.google.com/sheets/api/quickstart/python
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', self.scopes)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
return service
def test_google_connection(self):
"""
Write some data to the OpenANC Published sheet to confirm that Google connection works
"""
df = pd.DataFrame({'a': [1,2], 'b': [3,4]})
tz = pytz.timezone('America/New_York')
dc_now = datetime.now(tz)
dc_timestamp = dc_now.strftime('%Y-%m-%d %H:%M:%S') # Hour of day: %-I:%M %p
df['updated_at'] = dc_timestamp
self.upload_to_google_sheets(df, list(df.columns), 'openanc_published', 'ConnectionTest')
print('Successfully wrote data to Google Sheets.')
def upload_to_google_sheets(self, df, columns_to_publish, destination_spreadsheet, destination_sheet):
"""
Push values to a Google Sheet
Note that dates are not JSON serializable, so dates have to be converted to strings
"""
for c in columns_to_publish:
df[c] = df[c].fillna('')
values = [columns_to_publish]
values += df[columns_to_publish].to_numpy().tolist()
body = {'values': values}
# value_input_option = 'RAW'
value_input_option = 'USER_ENTERED'
spreadsheet_id = self.spreadsheet_ids[destination_spreadsheet]
destination_range = destination_sheet + '!A:' + string.ascii_uppercase[len(columns_to_publish) - 1]
result = self.service.spreadsheets().values().update(
spreadsheetId=spreadsheet_id, range=destination_range,
valueInputOption=value_input_option, body=body).execute()
cells_updated = result.get('updatedCells')
print(f'{cells_updated} cells updated in Google Sheet: {destination_spreadsheet}, sheet: {destination_sheet}')
def assemble_smd_info(self, duplicate_check=False, print_counts=False, publish_to_google_sheets=False):
"""
Return DataFrame, one row per district, with candidate names and counts
Destination is a Mapbox dataset
"""
districts = pd.read_csv('data/districts.csv')
candidates = pd.read_csv('data/candidates.csv')
commissioners = list_commissioners(status='current')
people = pd.read_csv('data/people.csv')
candidate_statuses = pd.read_csv('data/candidate_statuses.csv')
candidate_people = pd.merge(candidates, people, how='inner', on='person_id')
candidate_people.rename(columns={'full_name': 'full_name_candidate'}, inplace=True)
cps = pd.merge(candidate_people, candidate_statuses, how='inner', on='candidate_status')
commissioner_people = pd.merge(commissioners, people, how='inner', on='person_id')
commissioner_people.rename(columns={'full_name': 'full_name_commissioner'}, inplace=True)
# Only include active candidates
district_candidates = pd.merge(districts, cps[cps['count_as_candidate']].copy(), how='left', on='smd_id')
# todo: make this candidate order also randomized
district_info = district_candidates.groupby(['smd_id', 'map_color_id']).agg({
'full_name_candidate': list
, 'candidate_id': 'count'
}).reset_index()
district_info_comm = pd.merge(district_info, commissioner_people[['smd_id', 'full_name_commissioner']], how='left', on='smd_id')
district_info_comm.rename(columns={
'full_name_commissioner': 'current_commissioner'
, 'full_name_candidate': 'list_of_candidates'
, 'candidate_id': 'number_of_candidates'
}, inplace=True)
district_info_comm['current_commissioner'] = district_info_comm['current_commissioner'].fillna('(vacant)')
district_info_comm.loc[district_info_comm['number_of_candidates'] == 0, 'list_of_candidates'] = (
district_info_comm.loc[district_info_comm['number_of_candidates'] == 0, 'list_of_candidates'].apply(
lambda x: ['(no known candidates)'])
)
district_info_comm['list_of_candidates'] = district_info_comm['list_of_candidates'].apply(', '.join)
# Maybe add Last Updated to this?
if duplicate_check:
district_info_comm[district_info_comm['number_of_candidates'] > 1][['smd_id', 'current_commissioner', 'list_of_candidates']].to_csv('data/check_for_duplicates.csv', index=False)
if print_counts:
print('Candidate Count: {}'.format( cps['count_as_candidate'].sum()))
print('\nDistricts by number of candidates: ')
print(district_info_comm.groupby('number_of_candidates').size())
print()
if publish_to_google_sheets:
if len(district_info_comm) != 296:
raise ValueError('The number of districts to publish to Google Sheets is not correct.')
district_info_comm['openanc_link'] = 'https://openanc.org/ancs/districts/' + district_info_comm['smd_id'].str.replace('smd_', '').str.lower() + '.html'
columns_to_publish = ['smd_id', 'current_commissioner', 'number_of_candidates', 'list_of_candidates', 'openanc_link']
self.upload_to_google_sheets(district_info_comm, columns_to_publish, 'openanc_published', 'SMD Candidates 2020')
return district_info_comm
def build_map_display_box(self, cp):
"""
Build a string containing names of the commissioner and commissioner-elect.
This entire string will be displayed in the map display box on the lower right of all maps
"""
for idx, row in cp.iterrows():
smd_id = row['smd_id']
smd_display = smd_id.replace('smd_','')
smd_display_lower = smd_display.lower()
map_display_box = (
f'<b>District {smd_display}</b>'
+ f'<br/><a href="ancs/districts/{smd_display_lower}.html">District Page</a>'
+ f'<br/>Commissioner: {row.current_commissioner}'
)
# If a commissioner with a future start_date exists for the SMD, append the Commissioner-Elect string
if pd.notnull(row.commissioner_elect):
map_display_box += f'<br/>Commissioner-Elect: {row.commissioner_elect}'
cp.loc[idx, 'map_display_box'] = map_display_box
return cp
def add_data_to_geojson(self):
"""
Save new GeoJSON files with updated data fields based off of the results of the election
# todo: push these tilesets to Mapbox via API
"""
district_comm_commelect = build_district_comm_commelect()
cp_current_future = self.build_map_display_box(district_comm_commelect)
divo = assemble_divo()
cp_divo = pd.merge(cp_current_future, divo[['smd_id', 'votes']], how='inner', on='smd_id')
cp_divo = cp_divo.rename(columns={'votes': 'votes_2020'})
# Add data to GeoJSON file with SMD shapes
smd = gpd.read_file('maps/smd.geojson')
# Use the map_color_id field from the Google Sheets over what is stored in the GeoJSON
smd.drop(columns=['map_color_id'], inplace=True)
smd_df = smd.merge(cp_divo, on='smd_id')
# add ward to the SMD dataframe
smd_df.to_file('uploads/to-mapbox-smd-data.geojson', driver='GeoJSON')
# Add data to CSV with lat/long of SMD label points
lp = pd.read_csv('maps/label-points.csv')
lp_df = pd.merge(lp, cp_divo[['smd_id', 'current_commissioner', 'commissioner_elect', 'votes_2020']], how='inner', on='smd_id')
lp_df_cp = pd.merge(lp_df, cp_current_future[['smd_id', 'map_display_box']], how='inner', on='smd_id')
lp_df_cp.to_csv('uploads/to-mapbox-label-points-data.csv', index=False)
def add_data_to_geojson_candidates(self):
"""
Save new GeoJSON files with updated data fields
# todo: push these tilesets to Mapbox via API
"""
df = self.assemble_smd_info(
duplicate_check=False
, print_counts=False
, publish_to_google_sheets=False
)
# Add data to GeoJSON file with SMD shapes
smd = gpd.read_file('maps/smd.geojson')
# Use the map_color_id field from the Google Sheets over what is stored in the GeoJSON
smd.drop(columns=['map_color_id'], inplace=True)
smd_df = smd.merge(df, on='smd_id')
# add ward to the SMD dataframe
districts = pd.read_csv('data/districts.csv')
smd_df_ward = pd.merge(smd_df, districts[['smd_id', 'ward']], how='inner', on='smd_id')
smd_df_ward.to_file('uploads/to-mapbox-smd-data.geojson', driver='GeoJSON')
# Add data to CSV with lat/long of SMD label points
lp = pd.read_csv('maps/label-points.csv')
lp_df = pd.merge(lp, df[['smd_id', 'current_commissioner', 'number_of_candidates', 'list_of_candidates']], how='inner', on='smd_id')
lp_df.to_csv('uploads/to-mapbox-label-points-data.csv', index=False)
def publish_commissioner_list(self):
"""
Publish list of commissioners to OpenANC Published
Based off of the notebook, Twitter_Accounts_of_Commissioners.ipynb
"""
# Commissioners currently active
commissioners = list_commissioners(status='current')
people = pd.read_csv('data/people.csv')
districts = pd.read_csv('data/districts.csv')
dc = pd.merge(districts, commissioners, how='left', on='smd_id')
dcp = pd.merge(dc, people, how='left', on='person_id')
dcp['start'] = dcp['start_date'].dt.strftime('%Y-%m-%d')
dcp['end'] = dcp['end_date'].dt.strftime('%Y-%m-%d')
twttr = dcp.sort_values(by='smd_id')
if len(twttr) != 296:
raise ValueError('The number of districts to publish to Google Sheets is not correct.')
twttr['openanc_link'] = 'https://openanc.org/ancs/districts/' + twttr['smd_id'].str.replace('smd_', '').str.lower() + '.html'
columns_to_publish = ['smd_id', 'person_id', 'full_name', 'start', 'end', 'twitter_link', 'facebook_link', 'website_link', 'openanc_link']
self.upload_to_google_sheets(twttr, columns_to_publish, 'openanc_published', 'Commissioners')
def publish_results(self):
"""
Publish results from 2020 elections to OpenANC Published
"""
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
results = | pd.read_csv('data/results.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[60]:
import numpy as np
import pandas as pd
# pandas library is attatched with a zip file in submission.
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize.toktok import ToktokTokenizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import KFold, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from string import punctuation
import re
stop = stopwords.words('english')
# # Basic cleaning
# remove punctuations and numbers
# In[61]:
df_positive = pd.read_table("rt-polarity.pos",sep='\n',header = None,dtype=str,encoding='latin-1')
df_negative = pd.read_table("rt-polarity.neg",sep='\n',header = None,dtype=str,encoding='latin-1')
df_positive[1]='1'
df_negative[1]='0'
def remove_punctuations(sentence):
for p in punctuation:
sentence = sentence.replace(p,'')
return sentence
df_positive[0]=df_positive[0].apply(remove_punctuations)
df_negative[0]=df_negative[0].apply(remove_punctuations)
def remove_digits(sentence):
sentence = ''.join([i for i in sentence if not i.isdigit()])
return sentence
df_positive[0]=df_positive[0].apply(remove_digits)
df_negative[0]=df_negative[0].apply(remove_digits)
df_positive[0].dropna(inplace = True)
df_positive[1].dropna(inplace = True)
df_negative[0].dropna(inplace = True)
df_negative[1].dropna(inplace = True)
df_train_pos = df_positive[:4264]
df_test_pos = df_positive[4264:]
df_train_neg = df_negative[:4265]
df_test_neg = df_negative[4265:]
df_train = pd.concat([df_train_pos,df_train_neg])
# till 8529
df_test = pd.concat([df_test_pos,df_test_neg])
# 8529 till end
df_mixed = | pd.concat([df_train,df_test]) | pandas.concat |
import requests
import json
import time
import pandas as pd
import sqlalchemy
#TODO: Update token_file path (should be in JSON format)
token_file = r'C:\Users\vamsh\Desktop\temp\tokens.txt'
tokens = {}
with open(token_file) as f:
tokens = json.loads(f.read())
BEARER_TOKEN = tokens['Bearer Token']
TARGET_TABLE = 'tweets'
TARGET_SCHEMA = 'dbo' # Using default dbo for simplicity.
def bearer_oauth(r):
"""
Method required by bearer token authentication.
"""
r.headers["Authorization"] = f"Bearer {BEARER_TOKEN}"
r.headers["User-Agent"] = "v2FilteredStreamPython"
return r
def define_sql_engine(sql_server_name, target_db_name):
# Setup the connection Engine
engine = sqlalchemy.create_engine(f'mssql+pyodbc://{sql_server_name}/{target_db_name}?driver=SQL+Server')
return engine
def get_rules():
response = requests.get(
"https://api.twitter.com/2/tweets/search/stream/rules", auth=bearer_oauth
)
print(json.dumps(response.json()))
return response.json()
def delete_all_rules(rules):
if rules is None or "data" not in rules:
return None
ids = list(map(lambda rule: rule["id"], rules["data"]))
payload = {"delete": {"ids": ids}}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth=bearer_oauth,
json=payload
)
print(json.dumps(response.json()))
def set_rules():
'''
Sets the Justin Bieber rule and returns True if the rule was applied properly, and False if not.
Filtering out Music related tweets using Twitters Context Annotation -
54: Musician, 84: Book Music Genre, 89: Music Album
Only excluding 89.* because volume of tweets is very limited when Musician, and/or Book Music Genre
are also excluded.
'''
rules = [
{"value": "-context:89.* -is:retweet (\"<NAME>\") lang:en"}
]
payload = {"add": rules}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth=bearer_oauth,
json=payload,
)
resp_json = response.json()
# The response should have a 'data' key
if 'data' in resp_json:
print(json.dumps(resp_json))
return True
return False
def get_stream(sql_engine):
tweet_fields = 'id,text,author_id,conversation_id,created_at'
response = requests.get(
f"https://api.twitter.com/2/tweets/search/stream?tweet.fields={tweet_fields}"
, auth=bearer_oauth, stream=True,
)
print(response.status_code)
for response_line in response.iter_lines():
if response_line:
json_response = json.loads(response_line)
print(json.dumps(json_response, indent=4, sort_keys=True))
load_tweet(json_response, sql_engine)
def load_tweet(tweet_json, sql_engine):
tweet_dict = tweet_json['data']
data ={
'author_id' : [tweet_dict['author_id']],
'conversation_id' : [tweet_dict['conversation_id']],
'id' : [tweet_dict['id']],
'text' : [tweet_dict['text']],
'created_at' : [tweet_dict['created_at']]
}
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui,QtWidgets
from pyqtgraph.dockarea import *
from time import perf_counter
from pyqtgraph.graphicsItems.GradientEditorItem import Gradients
import sys
import numpy as np
import os
from functools import partial
import time
from openpyxl import load_workbook
import pandas as pd
import time
pg.mkQApp()
## Define main window class from template
path = os.path.dirname(os.path.abspath(__file__))
uiFile = os.path.join(path, 'GUIDE.ui')
WindowTemplate, TemplateBaseClass = pg.Qt.loadUiType(uiFile)
### BEGIN Modele class ###
class Modele():
def __init__(self):
# Allow importing any file provided as argument in the form: python3 GUIDE.py -f model_input
if len(sys.argv) > 1:
import importlib
option_name = sys.argv[1]
assert option_name == '-f', f"option '{option_name}' not understood. Known option is only '-f' with filename as value"
assert len(sys.argv)>=3, "provide a filename to load parameters from"
lib_path = sys.argv[2]
if lib_path.endswith('.py'): lib_path = lib_path.rstrip('.py')
input_file = importlib.import_module(lib_path.replace('/','.'))
else:
import model_input as input_file
# Loading plots configuration (used in MainWindow class)
docks = input_file.load_docks()
setattr(self,'docks',docks)
# Loading window parameters
window_params = input_file.window_params
if 'streaming' not in window_params.keys(): self.streaming = True
for window_param in window_params.keys():
setattr(self,window_param,window_params[window_param])
# Tracking time
self.nstep = 0
self.time_stamp = np.zeros(self.array_size).astype(np.float64)
# Loading parameters
params = input_file.load_params()
setattr(self,'params',params)
for param in self.params.keys():
if isinstance(self.params[param]['step'],int): typ = int
else: typ = np.float64
self.params[param]['value'] = self.params[param]['init_cond'] * np.ones(self.array_size).astype(typ)
# Set default plot for params to False if none provided
for param in self.params.keys():
if 'plot' not in self.params[param].keys():
self.params[param]['plot'] = False
# Loading variables
variables = input_file.load_variables()
setattr(self,'variables',variables)
# Loading observables
observables = input_file.load_observables()
setattr(self,'observables',observables)
# List as defined in the input file (as observables are added to variables dict)
list_variables = list(self.variables.keys())
list_observables = list(self.observables.keys())
# Concatenate the variables and observables dict (if 'invert_var_obs' then invert order observables and variables are displayed)
if not 'invert_order_obs_var' in window_params.keys(): self.variables = dict(self.variables, **self.observables)
else:
if window_params['invert_order_obs_var']:
self.variables = dict(self.observables, **self.variables)
else:
self.variables = dict(self.variables, **self.observables)
# Build main dict of variables
for variable in self.variables.keys():
self.variables[variable]['value'] = self.variables[variable]['init_cond'] * np.ones(self.array_size).astype(self.variables[variable]['type'])
if variable in list_variables:
self.variables[variable]['observable'] = False
elif variable in list_observables:
self.variables[variable]['observable'] = True
# Assert no params, variables and observables are called the same
assert len(set(list_variables)&set(list_observables))==0 and len(set(list_variables)&set(list(self.params.keys())))==0 and len(set(list(self.params.keys()))&set(list_observables))==0, f"Repeated name for variables, observables and/or parameters"
# Set default plot for variables to True if none provided
for variable in self.variables.keys():
if 'plot' not in self.variables[variable].keys():
self.variables[variable]['plot'] = True
# Loading equations into keyword 'equation' in variables dict
# 'diff_eq_' and 'eq_' are default patterns for variables and observables respectively
pattern_variables = 'diff_eq_'
for key in [attr for attr in input_file.__dict__.keys() if attr.startswith(pattern_variables)]:
variable = key.split(pattern_variables)[-1]
if variable not in list_variables:
print(f"Warning: Equation for Variable {variable} not used or not understood")
continue
if 'equation' in self.variables[variable].keys(): continue
self.variables[variable]['equation'] = input_file.__dict__[key]
pattern_observables = 'eq_'
for key in [attr for attr in input_file.__dict__.keys() if attr.startswith(pattern_observables)]:
variable = key.split(pattern_observables)[-1]
if variable not in list_observables:
print(f"Warning: Equation for Observable {variable} not used or not understood")
continue
if 'equation' in self.variables[variable].keys(): continue
self.variables[variable]['equation'] = input_file.__dict__[key]
# Create dict of the usable kernels
self.kernels = {}
pattern_kernels = 'kernel_'
for key in [attr for attr in self.__dir__() if attr.startswith(pattern_kernels)]:
kernel = key.split(pattern_kernels)[-1]
self.kernels[kernel] = {}
self.kernels[kernel]['value'] = getattr(self,key)
for key in [attr for attr in input_file.__dict__.keys() if attr.startswith(pattern_kernels)]:
kernel = key.split(pattern_kernels)[-1]
self.kernels[kernel] = {}
self.kernels[kernel]['value'] = input_file.__dict__[key]
# Load additional keyboard keys if any provided
self.user_defined_keyPressEvent = input_file.keyboard_keys()
if self.user_defined_keyPressEvent is None: self.user_defined_keyPressEvent = {} # if None provided
system_reserved_keys = [" ", "<KEY>"]
for user_defined_key in self.user_defined_keyPressEvent.keys():
assert user_defined_key not in system_reserved_keys, f"User defined key '{user_defined_key}' in system reserved ones {system_reserved_keys}"
########################### BEGIN Assertions input file ###########################
# 'dock' (variables): Not providing dock_name that doesn't exist
for variable in self.variables.keys():
if 'dock' in self.variables[variable]:
for dock_name in self.variables[variable]['dock']:
if not isinstance(dock_name,dict):
assert dock_name in self.docks.keys(), f"Dock name '{dock_name}' for variable {variable} not understood. Dock name must be in {list(self.docks.keys())}"
# all variables have an equation
for variable in self.variables.keys():
assert 'equation' in self.variables[variable].keys(), f"An equation for variable {variable} must be provided"
########################### END Assertions input file ###########################
def simulator(self):
""" Calculate 1 time step and update arrays """
# Actual computation (pass only the 'value' keyword of each sub-dictionnary)
self.computation_result_dict = self.kernels[self.kernel]['value']({key:value['value'][-1] for (key,value) in self.variables.items() if not value['observable']},{key:value['value'][-1] for (key,value) in self.params.items()}) # use last value of all variables for the computations of next step
# Update last values to the newest calculated
for variable in self.variables.keys():
if not self.variables[variable]['observable']:
# Simpler concatenate replacing directly indices
self.variables[variable]['value'][:-1] = self.variables[variable]['value'][1:]
self.variables[variable]['value'][-1] = self.computation_result_dict[variable]
# Evaluate observables
self.update_observables()
def update_observables(self):
for variable in self.variables.keys():
if self.variables[variable]['observable']:
self.obs_computation_result = self.variables[variable]['equation'](self,{key:value['value'] for (key,value) in self.variables.items()},{key:value['value'][-1] for (key,value) in self.params.items()})
if 'calculation_size' in self.variables[variable].keys() and self.variables[variable]['calculation_size']:
self.variables[variable]['value'] = self.obs_computation_result
else:
try: index = len(self.obs_computation_result)
except TypeError: index = 1 # If return only a single value
self.variables[variable]['value'][:-index] = self.variables[variable]['value'][index:]
self.variables[variable]['value'][-index:] = self.obs_computation_result
def kernel_euler(self, variables, params):
""" N variables Euler algorithm (A = A + dt * eq_A(params)) """
new_variables = {}
for variable_name in variables.keys():
new_variables[variable_name] = variables[variable_name] + self.step_size * self.variables[variable_name]['equation'](self,variables,params)
return new_variables
def kernel_RK4(self, variables, params):
""" N variables RK4 algorithm """
temp_variables = variables.copy()
# Loop for each coefficient on all equations
coefs_1 = {}
for variable_name in variables.keys():
coefs_1[variable_name] = self.variables[variable_name]['equation'](self,temp_variables,params)
coefs_2 = {}
for variable_name in variables.keys(): # evaluate variables first
temp_variables[variable_name] = variables[variable_name] + (self.step_size/2.)*coefs_1[variable_name]
for variable_name in variables.keys():
coefs_2[variable_name] = self.variables[variable_name]['equation'](self,temp_variables,params)
coefs_3 = {}
for variable_name in variables.keys():
temp_variables[variable_name] = variables[variable_name] + (self.step_size/2.)*coefs_2[variable_name]
for variable_name in variables.keys():
coefs_3[variable_name] = self.variables[variable_name]['equation'](self,temp_variables,params)
coefs_4 = {}
for variable_name in variables.keys():
temp_variables[variable_name] = variables[variable_name] + self.step_size*coefs_3[variable_name]
for variable_name in variables.keys():
coefs_4[variable_name] = self.variables[variable_name]['equation'](self,temp_variables,params)
new_variables = {}
for variable_name in variables.keys():
new_variables[variable_name] = variables[variable_name] + (self.step_size/6.)*(coefs_1[variable_name]+2*coefs_2[variable_name]+2*coefs_3[variable_name]+coefs_4[variable_name])
return new_variables
### BEGIN MainWindow class ###
class MainWindow(TemplateBaseClass,Modele):
def __init__(self):
# Extra useful attributes
self.fps = None
self.lastTime = perf_counter()
self.colors_dict = {'b':{'rgb':(31,119,180),'hex':'#1f77b4'},'o':{'rgb':(255,127,14),'hex':'#ff7f0e'},'g':{'rgb':(44,160,44),'hex':'#2ca02c'},'r':{'rgb':(214,39,40),'hex':'#d62728'},'p':{'rgb':(148,103,189),'hex':'#9467bd'},'y':{'rgb':(255,255,0),'hex':'#ffff00'},'brown':{'rgb':(140,86,75),'hex':'#8c564bq'},'pink':{'rgb':(227,119,194),'hex':'#e377c2'},'grey':{'rgb':(127,127,127),'hex':'#7f7f7f'},'c':{'rgb':(23,190,207),'hex':'#7f7f7f'}}
self.flag_colormaps = 1
self.colormaps_list = ['thermal','yellowy','greyclip','grey','viridis','inferno']
# Create variables and parameters
#Modele.__init__(self) # Commented as called by TemplateBaseClass.__init__(self)
# Load UI
TemplateBaseClass.__init__(self) # This seems to call Modele.__init__(self) => Commenting the first occurence
self.setWindowTitle('Graphical User Interface for Differential Equations (GUIDE)')
# Create the main window
self.ui = WindowTemplate()
self.ui.setupUi(self)
try: self.resize(*self.window_size)
except: pass
# Set main theme from self.window_params['theme']
if 'theme' in self.__dict__.keys() and self.theme == 'dark':
QtGui.QApplication.setStyle("Fusion")
self.palette = self.palette()
self.palette.setColor(QtGui.QPalette.Window, QtGui.QColor(53, 53, 53))
self.palette.setColor(QtGui.QPalette.WindowText, QtCore.Qt.white)
self.palette.setColor(QtGui.QPalette.Base, QtGui.QColor(25, 25, 25))
self.palette.setColor(QtGui.QPalette.AlternateBase, QtGui.QColor(53, 53, 53))
self.palette.setColor(QtGui.QPalette.ToolTipBase, QtCore.Qt.black)
self.palette.setColor(QtGui.QPalette.ToolTipText, QtCore.Qt.white)
self.palette.setColor(QtGui.QPalette.Text, QtCore.Qt.white)
self.palette.setColor(QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))
self.palette.setColor(QtGui.QPalette.ButtonText, QtCore.Qt.white)
self.palette.setColor(QtGui.QPalette.BrightText, QtCore.Qt.red)
self.palette.setColor(QtGui.QPalette.Link, QtGui.QColor(42, 130, 218))
self.palette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(42, 130, 218))
self.palette.setColor(QtGui.QPalette.HighlightedText, QtCore.Qt.black)
self.setPalette(self.palette)
# Button, sliders and spinboxes drawn in qtdesigner
#ICs_button
self.ui.ICs_button.clicked.connect(self.update_ICs_button)
self.ui.ICs_button.keyPressEvent = self.keyPressEvent
#nstep_slider
self.ui.nstep_slider.setRange(1,int(self.array_size/10))
self.ui.nstep_slider.setValue(self.nstep_update_plot)
self.ui.nstep_slider.valueChanged.connect(self.update_nstep_slider)
#nstep_spinbox
self.ui.nstep_spinbox.setRange(1,int(self.array_size/10))
self.ui.nstep_spinbox.setSingleStep(1)
self.ui.nstep_spinbox.setValue(self.nstep_update_plot)
self.ui.nstep_spinbox.setKeyboardTracking(False) # emit signal only when enter is pressed
self.ui.nstep_spinbox.valueChanged.connect(self.update_nstep_spinbox)
#fps_label
self.update_fps_label()
#record_label
self.ui.record_label.setText(' Rec. ')
########################## BEGIN figure layout and docks ##########################
# Dock declaration and initial placement
self.main_dock_area = self.ui.dock_area
for dock_name in self.docks.keys():
self.add_dock(dock_name) # add 'dock' and 'region' keywords into self.docks[dock_name]
# Declaration of the plots in respective docks
accepted_dock_types = ['plot1D','plot2D','image']
assert self.docks[dock_name]['type'] in accepted_dock_types, f"Dock type '{self.docks[dock_name]['type']}' not understood. Dock type must be in {accepted_dock_types}"
flag2 = 0
alpha_factor_linearregion = 60 # 0 -> 255
self.warning_observables_docks = []
for dock_name in self.docks.keys():
if self.docks[dock_name]['type'] == 'plot1D':
self.create_PlotWidget(dock_name) # add 'actual_plot' keyword into self.docks[dock_name]
# Attribution of the curves to the plots
flag = 0
self.docks[dock_name]['curve'] = {}
# Create curves objects for variables, observables and params
for variable in self.variables.keys():
if 'dock' in self.variables[variable].keys():
if dock_name in self.variables[variable]['dock']:
self.docks[dock_name]['curve'][variable] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
else:
self.docks[dock_name]['curve'][variable] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
flag += 1
for param in self.params.keys():
if 'dock' in self.params[param].keys():
if dock_name in self.params[param]['dock']:
self.docks[dock_name]['curve'][param] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
else:
self.docks[dock_name]['curve'][param] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
flag += 1
if 'zoomOf' in self.docks[dock_name].keys():
relatedTo = self.docks[dock_name]['zoomOf']
# Create region and store in its according plot dict
self.docks[relatedTo]['region'][dock_name] = pg.LinearRegionItem([self.array_size/2.-self.array_size/30.,self.array_size/2.+self.array_size/30.],brush=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag2,len(self.colors_dict))]]['rgb']+(alpha_factor_linearregion,))
self.docks[relatedTo]['region'][dock_name].setZValue(-10)
self.docks[relatedTo]['actual_plot'].addItem(self.docks[relatedTo]['region'][dock_name])
self.docks[relatedTo]['region'][dock_name].sigRegionChanged.connect(partial(self.update_zoom_plot,dock_name,relatedTo))
# Link region and zoom plot
self.docks[dock_name]['actual_plot'].sigXRangeChanged.connect(partial(self.update_xzoom_region,dock_name,relatedTo))
flag2 += 1
### WARNING Does not work probably due to an internal bug (waiting for answer)
#print('1',self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[1])
#print('2',self.docks[relatedTo]['actual_plot'].getViewBox().viewRange()[1])
#self.docks[dock_name]['actual_plot'].setYLink(self.docks[relatedTo]['actual_plot'])
#print('1',self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[1])
#print('2',self.docks[relatedTo]['actual_plot'].getViewBox().viewRange()[1])
self.update_zoom_plot(dock_name,relatedTo)
elif self.docks[dock_name]['type'] == 'plot2D':
self.create_PlotWidget(dock_name)
# Attribution of the curves to the plots
flag = 0
self.docks[dock_name]['curve'] = {}
for variable in self.variables.keys():
if 'dock' in self.variables[variable].keys():
# if element of 'dock' (variables/observables) is a dict
for element_variable_dock in self.variables[variable]['dock']:
if isinstance(element_variable_dock,dict):
if dock_name in element_variable_dock.keys():
for real_dock_name in element_variable_dock.keys():
# assert only two variables to plot
assert len(element_variable_dock[real_dock_name]) == 2, f"list of variables/observables to plot on {real_dock_name} with dock type 'plot2D' must be exactly of length 2, provided was {len(element_variable_dock[real_dock_name])}"
list_variables_to_plot = element_variable_dock[real_dock_name]
# assert variables provided do exist
for variables_to_plot in list_variables_to_plot:
assert variables_to_plot in self.variables.keys() or variables_to_plot in self.params.keys(),f"variable '{variables_to_plot}' in 'dock' key of variable '{variable}' (variables/observables/params dictionnary) not understood. Must be in {list(dict(self.variables, **self.params).keys())}"
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)] = {}
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)]['curve'] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)]['variables_to_plot'] = list_variables_to_plot
flag += 1
else:
# Check validity of the provided dock_names
for real_dock_name in element_variable_dock.keys():
if real_dock_name not in self.docks.keys():
if [variable,element_variable_dock] not in self.warning_observables_docks:
self.warning_observables_docks.append([variable,element_variable_dock]) # to throw error only once
print(f"WARNING: check validity of dock_names you provided in the variables/observable dictionnary: {list(element_variable_dock.keys())}'")
if flag == 0: # Nothing plotted on the 'plot2D'
print(f"WARNING: nothing has been plotted on the 'plot2D' dock with name '{dock_name}'")
if 'zoomOf' in self.docks[dock_name].keys():
pass
elif self.docks[dock_name]['type'] == 'image':
self.create_ImageView(dock_name)
self.docks[dock_name]['actual_plot'].keyPressEvent = self.keyPressEvent
#self.docks[dock_name]['actual_plot'].enableAutoRange('xy', True)
########################## END figure layout and docks ##########################
############################ BEGIN Trees declaration ############################
# Variables Tree
self.tree = self.ui.tree
self.tree.setColumnCount(3)
self.tree.keyPressEvent = self.keyPressEvent # allow keys catching for focus on trees
self.tree.setHeaderLabels(['Variables','IC','plot'])
flag = 0
for variable in self.variables.keys():
temp = pg.TreeWidgetItem([variable])
temp.setForeground(0,QtGui.QBrush(QtGui.QColor(self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['hex'])))
# Create linedit (variables only)
if not self.variables[variable]['observable']:
self.variables[variable]['lineedit'] = QtGui.QLineEdit()
temp.setWidget(1, self.variables[variable]['lineedit'])
self.variables[variable]['lineedit'].setText(str(self.variables[variable]['value'][-1])) # set initial value
self.variables[variable]['lineedit'].returnPressed.connect(partial(self.update_lineedit_variable,variable))
# Create checkbox
self.variables[variable]['checkbox'] = QtGui.QCheckBox()
temp.setWidget(2, self.variables[variable]['checkbox'])
self.tree.addTopLevelItem(temp)
self.variables[variable]['checkbox'].setChecked(self.variables[variable]['plot']) # set initial state
self.variables[variable]['checkbox'].keyPressEvent = self.keyPressEvent # connect keys
self.variables[variable]['checkbox'].stateChanged.connect(partial(self.update_checkbox_variable,variable)) # connect checkbox
flag += 1
# Params Tree
self.tree_params = self.ui.tree_params
self.tree_params.setColumnCount(4)
self.tree_params.keyPressEvent = self.keyPressEvent
self.tree_params.setHeaderLabels(['Params','plot','value','slider'])
self.spinbox_precision = 3
for param in self.params.keys():
self.params[param]['slider_conversion_factor'] = int(1./self.params[param]['step']) # To test was: 5000 *10000
temp = pg.TreeWidgetItem([param])
# Spin boxes
self.params[param]['spinbox'] = QtGui.QDoubleSpinBox()
self.params[param]['spinbox'].setRange(self.params[param]['min'],self.params[param]['max'])
self.params[param]['spinbox'].setSingleStep(self.params[param]['step'])
if isinstance(self.params[param]['step'],int):
self.params[param]['spinbox'].setDecimals(0)
else:
self.params[param]['spinbox'].setDecimals(self.spinbox_precision)
temp.setWidget(2, self.params[param]['spinbox'])
self.tree_params.addTopLevelItem(temp)
self.params[param]['spinbox'].setValue(self.params[param]['value'][-1])
self.params[param]['spinbox'].setKeyboardTracking(False) # emit signal only when enter is pressed
self.params[param]['spinbox'].valueChanged.connect(partial(self.update_slider_params,param))
# Sliders
self.params[param]['slider'] = QtGui.QSlider()
self.params[param]['slider'].setRange(int(self.params[param]['min']*self.params[param]['slider_conversion_factor']),int(self.params[param]['max']*self.params[param]['slider_conversion_factor']))
self.params[param]['slider'].setSingleStep(1) # integers only
self.params[param]['slider'].setOrientation(QtCore.Qt.Orientation.Horizontal) # horizontale
temp.setWidget(3, self.params[param]['slider'])
self.tree.addTopLevelItem(temp)
value = np.round(self.params[param]['value'][-1]*self.params[param]['slider_conversion_factor'],self.spinbox_precision) # convert in slider integer unit
self.params[param]['slider'].setValue(int(value))
self.params[param]['slider'].valueChanged.connect(partial(self.update_spinbox_params,param))
# Create checkbox
self.params[param]['checkbox'] = QtGui.QCheckBox()
temp.setWidget(1, self.params[param]['checkbox'])
self.tree.addTopLevelItem(temp)
self.params[param]['checkbox'].setChecked(self.params[param]['plot']) # set initial state
self.params[param]['checkbox'].keyPressEvent = self.keyPressEvent # connect keys
self.params[param]['checkbox'].stateChanged.connect(partial(self.update_checkbox_variable,param)) # connect checkbox
flag += 1
# Kernel Tree
self.tree_kernels = self.ui.tree_kernels
self.tree_kernels.setColumnCount(2)
self.tree_kernels.keyPressEvent = self.keyPressEvent
self.tree_kernels.setHeaderLabels(['Kernels',''])
# Create a group of buttons to allow "exclusive" behavior
self.group_buttons_kernels = QtGui.QButtonGroup()
self.group_buttons_kernels.setExclusive(True)
for kernel in self.kernels.keys():
self.kernels[kernel]['checkbox'] = QtGui.QCheckBox()
self.group_buttons_kernels.addButton(self.kernels[kernel]['checkbox'], 1)
temp = pg.TreeWidgetItem([kernel])
temp.setWidget(1, self.kernels[kernel]['checkbox'])
self.tree_kernels.addTopLevelItem(temp)
if kernel == self.kernel:
self.kernels[kernel]['checkbox'].setChecked(True) # set initial state
self.kernels[kernel]['checkbox'].keyPressEvent = self.keyPressEvent
self.group_buttons_kernels.buttonClicked.connect(self.update_checkbox_kernel)
############################# END Trees declaration ############################
# Start showing the window
self.show()
# Connect timer to update the figure
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.run_simulator)
self.timer.start(10)
# Initial window states
if not self.streaming: self.timer.stop(); self.run_simulator()
self.update_pause_indicator()
self.update_record_state_indicator()
# If starts recording from beginning
if self.record_state:
self.toggle_record_state()
self.keyPressEvent("r")
self.t = 0
################################ BEGIN plots update ###################################
def update_zoom_plot(self,dock_name,relatedTo):
self.docks[dock_name]['actual_plot'].setXRange(*self.docks[relatedTo]['region'][dock_name].getRegion(), padding=0)
def update_xzoom_region(self,dock_name,relatedTo):
#print('1',self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[1])
#print('2',self.docks[relatedTo]['actual_plot'].getViewBox().viewRange()[1])
self.docks[relatedTo]['region'][dock_name].setRegion(self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[0])
def update_plots(self):
for dock_name in self.docks.keys():
if self.docks[dock_name]['type'] == 'plot1D':
for variable in self.variables.keys():
if self.variables[variable]['plot']:
if 'dock' in self.variables[variable].keys():
if dock_name in self.variables[variable]['dock']:
self.docks[dock_name]['curve'][variable].setData(self.variables[variable]['value'])
else:
self.docks[dock_name]['curve'][variable].setData(self.variables[variable]['value'])
for param in self.params.keys():
if self.params[param]['plot']:
if 'dock' in self.params[param].keys():
if dock_name in self.params[param]['dock']:
self.docks[dock_name]['curve'][param].setData(self.params[param]['value'])
else:
self.docks[dock_name]['curve'][param].setData(self.params[param]['value'])
elif self.docks[dock_name]['type'] == 'plot2D':
# plot the variable names that are pre stored in dock dict
for curve2D in self.docks[dock_name]['curve']:
# if there is a param in the list
list_params_in_variables_provided = [i for i in self.docks[dock_name]['curve'][curve2D]['variables_to_plot'] if i in list(self.params.keys())]
if len(list_params_in_variables_provided)==1:
param_provided = list_params_in_variables_provided[0]
index_param_provided = self.docks[dock_name]['curve'][curve2D]['variables_to_plot'].index(param_provided)
index_variable_provided = list(set([0,1]) - set([index_param_provided]))
if self.variables[curve2D.split('_plot2D_')[0]]['plot']:
if index_param_provided == 0:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.params[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
elif index_param_provided == 1:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.params[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
# no params provided
else:
# if variables specified, index 0 is to be plot
if self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['plot']:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
elif self.docks[dock_name]['type'] == 'image':
for variable in self.variables.keys():
if 'dock' in self.variables[variable].keys():
if self.variables[variable]['plot']:
if dock_name in self.variables[variable]['dock']:
self.docks[dock_name]['actual_plot'].setImage(self.variables[variable]['value'])
# Update fps_label
self.update_fps_label()
def run_simulator(self,nstep_update_plot=None):
if not nstep_update_plot: nstep_update_plot = self.nstep_update_plot
# Calculation
for i in range(nstep_update_plot):
self.simulator()
# If recording
if self.record_state and (self.nstep%self.nstep_record == 0): # record every self.nstep_record
self.append_to_dataframe()
# Update main plots every nstep_update_plot (last occurence of the loop)
if i==nstep_update_plot-1:
self.update_plots()
# Update time_stamp and parameter dict last (then saved correspond to calculation)
self.time_stamp[:-1] = self.time_stamp[1:]
self.time_stamp[-1] += self.step_size
self.nstep += 1
for param in self.params.keys():
self.params[param]['value'][:-1] = self.params[param]['value'][1:]
# Fix app freezing on Windows systems (if event occurs must process it)
QtCore.QCoreApplication.processEvents()
################################# END plots update ###################################
def keyPressEvent(self, event):
""" Set keyboard interactions """
try: key = event.text()
except: key = event # allow calling keys programatically
if key in list(self.user_defined_keyPressEvent.keys()): # Interprete keys defined user file
self.user_defined_keyPressEvent[key](self,{key:value['value'] for (key,value) in self.variables.items()},{key:value['value'][-1] for (key,value) in self.params.items()})
elif key == ' ':
self.toggle_streaming()
elif key == 'q':
sys.exit()
elif key == 'h':
previous_streaming_state = self.streaming
if previous_streaming_state: self.toggle_streaming()
self.display_help()
if previous_streaming_state: self.toggle_streaming()
elif key == 's' or key == 'r':
previous_streaming_state = self.streaming
if previous_streaming_state: self.toggle_streaming() # pause it
if key=='s': self.save() # query filename and save initial screenshot
elif key=='r':
if not self.record_state:
self.save(record=True)
else:
self.toggle_record_state()
self.save_screenshot(self.filename_to_record_no_ext+'_END.png')
self.save_appended_dataframe()
self.filename_to_record_no_ext = None
if previous_streaming_state: self.toggle_streaming()
elif key == 'i':
self.change_ICs_variable()
elif key == 'c':
self.update_images_colormap()
else:
if key != "" and event.key() != QtCore.Qt.Key_Return:
print(f'Keyboard event "{key}" not None')
def create_PlotWidget(self,dock_name):
self.docks[dock_name]['actual_plot'] = pg.PlotWidget(**{key:value for key,value in self.docks[dock_name].items() if key not in ['dock','type','position','relativeTo','size','zoomOf','region']})
self.docks[dock_name]['dock'].addWidget(self.docks[dock_name]['actual_plot'])
def create_ImageView(self,dock_name):
# Item for displaying image data
pl = pg.PlotItem() # to get axis
img = pg.ImageItem(axisOrder='row-major') # to rotate 90 degree
# Create an ImageView Widget
self.docks[dock_name]['actual_plot'] = pg.ImageView(view=pl,imageItem=img,**{key:value for key,value in self.docks[dock_name].items() if key not in ['dock','type','position','relativeTo','size','zoomOf','region']})
# Set initial states
self.docks[dock_name]['actual_plot'].view.invertY(False)
self.docks[dock_name]['actual_plot'].view.setAspectLocked(False)
self.docks[dock_name]['actual_plot'].view.disableAutoRange(True)
self.docks[dock_name]['actual_plot'].ui.menuBtn.hide()
#self.docks[dock_name]['actual_plot'].ui.menuBtn.show()
#self.docks[dock_name]['actual_plot'].ui.histogram.hide()
#self.docks[dock_name]['actual_plot'].ui.roiBtn.hide()
# Set colormap to be used
gradient = Gradients[self.colormaps_list[self.flag_colormaps]]
cmap = pg.ColorMap(pos=[c[0] for c in gradient['ticks']],color=[c[1] for c in gradient['ticks']], mode=gradient['mode'])
self.docks[dock_name]['actual_plot'].setColorMap(cmap)
self.docks[dock_name]['dock'].addWidget(self.docks[dock_name]['actual_plot'])
def add_dock(self,dock_name):
''' Add a dock to the main window '''
if 'relativeTo' in self.docks[dock_name].keys():
relativeto_dock_name = self.docks[dock_name]['relativeTo']
assert 'dock' in self.docks[relativeto_dock_name].keys(), f"Dock '{relativeto_dock_name}' not understood. Docks that are 'relativeTo' another must be defined after it in the dictionnary of docks for consistent behavior"
self.docks[dock_name]['region'] = {} # 'region' key to be used later
self.docks[dock_name]['dock'] = Dock(dock_name, size=self.docks[dock_name]['size'], closable=True)
self.main_dock_area.addDock(**{key:value for key,value in self.docks[dock_name].items() if key in ['dock','position','relativeTo']}) # key used: 'dock', 'position' and 'relativeTo'
def repaint_all_plots(self):
for dock_name in self.docks.keys():
if 'actual_plot' in self.docks[dock_name]:
self.docks[dock_name]['actual_plot'].repaint()
def toggle_streaming(self):
self.streaming = not(self.streaming)
self.update_pause_indicator()
def update_pause_indicator(self):
if self.streaming:
self.ui.run_label.setStyleSheet("QLabel {border: 3px solid %s; background-color : %s; color : %s; }" %('#000000',self.colors_dict['g']['hex'],(0,0,0)))
self.ui.run_label.setText(' Run ')
self.timer.start(10)
else:
self.ui.run_label.setStyleSheet("QLabel {border: 3px solid %s; background-color : %s; color : %s; }" %('#000000',self.colors_dict['r']['hex'],(0,0,0)))
self.ui.run_label.setText(' Stop ')
self.timer.stop()
self.ui.run_label.repaint()
def update_images_colormap(self):
self.flag_colormaps += 1
cmap_name = self.colormaps_list[np.mod(self.flag_colormaps,len(self.colormaps_list))]
gradient = Gradients[cmap_name]
cmap = pg.ColorMap(pos=[c[0] for c in gradient['ticks']],color=[c[1] for c in gradient['ticks']], mode=gradient['mode'])
for dock_name in self.docks.keys():
if self.docks[dock_name]['type'] == 'image':
if 'actual_plot' in self.docks[dock_name]:
self.docks[dock_name]['actual_plot'].setColorMap(cmap)
self.repaint_all_plots()
def update_record_state_indicator(self):
if self.record_state:
self.ui.record_label.setStyleSheet("border: 3px solid %s; border-radius: 22px; background-color : %s; color : %s" %('#000000',self.colors_dict['r']['hex'],(0,0,0)))
else:
self.ui.record_label.setStyleSheet("border: 3px solid %s; border-radius: 22px; background-color : %s; color : %s" %('#000000','#000000','#000000'))
self.ui.record_label.repaint()
def update_ICs_button(self):
for variable in self.variables.keys():
if not self.variables[variable]['observable']:
value = np.array(self.variables[variable]['init_cond']).astype(self.variables[variable]['type']) # convert to array to be able to astype
self.variables[variable]['lineedit'].setText(str(value)) # set initial value
self.variables[variable]['value'] = self.variables[variable]['init_cond'] * np.ones(self.array_size).astype(self.variables[variable]['type'])
def display_help(self):
# Message must be a list of each line to display
text_help_dialog = ['Important Notes:','- (keyboard keys) do not work when focus is given to lineedits or spinboxes','- ("image" plots) you must pause to modify the aspect ratio, zoom or histogram range']
text_help_dialog += ['']
text_help_dialog += ['Usable keyboard keys:','- " ": toggle run/stop','- "q": close the window','- "h": display this help message','- "s": save a snapshot and a dataframe','- "r": toggle recording, save snapshots at start/end','- "i": apply all variables ICs','- "c": change the colormap to be use to draw "image" plots']
text_help_dialog += ['']
text_help_dialog += ['Defined variables and observables:']
for variable in self.variables.keys():
temp = '- "'
temp += variable+'"'
if self.variables[variable]['observable']: temp += ' (observable)'
elif not self.variables[variable]['observable']: temp += ' (variable)'
if 'help' in self.variables[variable].keys(): temp += f": {self.variables[variable]['help']}"
text_help_dialog += [temp]
text_help_dialog += ['']
text_help_dialog += ['Defined parameters:']
for param in self.params.keys():
temp = '- "'
temp += param+'"'
if 'help' in self.params[param].keys(): temp += f", help: {self.params[param]['help']}"
for key in self.params[param].keys():
if key in ['min','max','step','value']:
if key=='value':
temp += f", {key}: {self.params[param][key][-1]}"
else:
temp += f", {key}: {self.params[param][key]}"
text_help_dialog += [temp]
help_dialog = ScrollMessageBox(text_help_dialog,size_help=(850,600))
help_dialog.setWindowTitle('Help message')
help_dialog.exec_()
################################# BEGIN save ###################################
def save(self,record=False,filename_to_save_no_ext=None):
self.filename_to_save_no_ext = filename_to_save_no_ext
if self.filename_to_save_no_ext is None:
save_dialog = QtGui.QFileDialog()
save_dialog.setFileMode(QtGui.QFileDialog.AnyFile)
save_dialog.setNameFilter("Output files (*.png *.xlsx)")
save_dialog.setWindowTitle("Saving files: screenshot, traces and window state")
if save_dialog.exec_():
filename_provided = save_dialog.selectedFiles()[0]
if '.' in filename_provided:
self.filename_to_save_no_ext = filename_provided.rstrip('.')
else:
self.filename_to_save_no_ext = filename_provided
# Build a dict of the existing conflicting files
existing_filename_dict = {}
for filename in [self.filename_to_save_no_ext+'.png',self.filename_to_save_no_ext+'.xlsx']:
if os.path.exists(filename):
existing_filename_dict[filename] = {}
existing_filename_dict[filename]['name'] = filename.split("/")[-1]
existing_filename_dict[filename]['path'] = filename.rstrip(filename.split("/")[-1])
existing_filename_dict[filename]['path']
# Open a confirmation window if filename_provided exists
if len(existing_filename_dict) > 0:
file_exists_dialog = QtGui.QMessageBox()
file_exists_dialog.setIcon(QtGui.QMessageBox.Warning)
file_exists_dialog.setWindowTitle('Warning: file already exists')
names = '" and "'.join([existing_filename_dict[key]['name'] for key in existing_filename_dict.keys()])
path = existing_filename_dict[list(existing_filename_dict.keys())[0]]['path']
if len(existing_filename_dict) > 1: extra_text = ['s','','them','them','their']
elif len(existing_filename_dict) == 1: extra_text = ['','s','it','it','its']
file_exists_dialog.setText(f'File{extra_text[0]} named "{names}" already exist{extra_text[1]} at location "{path}". Do you want to replace {extra_text[2]}?')
file_exists_dialog.setInformativeText(f'Replacing {extra_text[3]} will overwrite {extra_text[4]} contents forever.')
file_exists_dialog.setStandardButtons(QtGui.QMessageBox.Save|QtGui.QMessageBox.Cancel)
file_exists_dialog.setDefaultButton(QtGui.QMessageBox.Cancel)
file_exists_dialog.buttonClicked.connect(self.overwrite_buttons)
file_exists_dialog.exec_()
save_dialog.close()
# if closing the window or chose not to overwrite => no filename
if self.filename_to_save_no_ext is None: return
# save screenshot
time.sleep(0.05) # wait for save_dialog to close before the snapshot
add_text = '_START' if record else ''
self.save_screenshot(self.filename_to_save_no_ext+f"{add_text}.png")
# save dataframe with variables, observables and parameter values
self.save_dataframe(self.filename_to_save_no_ext+'.xlsx')
if record:
self.list_to_record = []
self.filename_to_record_no_ext = self.filename_to_save_no_ext
self.toggle_record_state()
def overwrite_buttons(self,event):
button_pressed = event.text()
if button_pressed == 'Cancel':
self.filename_to_save_no_ext = None
elif button_pressed == 'Save':
return
def toggle_record_state(self):
self.record_state = not(self.record_state)
self.update_record_state_indicator()
def save_screenshot(self,filename):
""" Save a screenshot of the main_splitter (the whole "main" window) """
screen = QtWidgets.QApplication.primaryScreen()
screenshot = screen.grabWindow( self.ui.main_splitter.winId() )
screenshot.save(filename, 'png')
print(f'File "{filename}" saved')
def save_dataframe(self,filename):
data_frame = self.build_dataframe_to_save()
data_frame.to_excel(filename,index=False)
print(f'File "{filename}" saved')
def save_appended_dataframe(self,sheet_name='Sheet1'):
writer = pd.ExcelWriter(self.filename_to_record_no_ext+'.xlsx', engine='openpyxl')
writer.book = load_workbook(self.filename_to_record_no_ext+'.xlsx')
startrow = writer.book[sheet_name].max_row
writer.sheets = {ws.title: ws for ws in writer.book.worksheets}
self.data_frame_to_record = | pd.DataFrame(self.list_to_record) | pandas.DataFrame |
# import library
import pandas as pd
import requests
from bs4 import BeautifulSoup
# data frame display options on terminal output for debugging
| pd.set_option('display.max_columns', None) | pandas.set_option |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import period_range, PeriodIndex, Index, date_range
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(
3, 2, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_union_misc(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
tm.assert_index_equal(result, index)
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.union(index2)
msg = 'can only call with other PeriodIndex-ed objects'
with tm.assertRaisesRegexp(ValueError, msg):
index.join(index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with | tm.assertRaises(period.IncompatibleFrequency) | pandas.util.testing.assertRaises |
import pandas as pd
crime_csvs = [
]
def load_and_reshape_police(filename):
df = pd.read_csv(filename, usecols=['REF_DATE', 'GEO', 'Statistics', 'VALUE'])
index = (df["Statistics"] == "Police officers per 100,000 population")
df = df[index]
df = df.pivot(index='REF_DATE', columns='GEO', values='VALUE')
df.index.rename('Year', inplace=True)
return df
police_dfs = [load_and_reshape_police(f) for f in [
'data/3510007601_databaseLoadingData_NB.csv',
'data/3510007601_databaseLoadingData_NL.csv',
'data/3510007601_databaseLoadingData_NS.csv',
'data/3510007601_databaseLoadingData_ON.csv',
'data/3510007601_databaseLoadingData_PE.csv',
'data/3510007601_databaseLoadingData_QC.csv']]
police = pd.concat(police_dfs, axis=1)
def load_and_reshape_crime(filename):
df = | pd.read_csv(filename, usecols=['REF_DATE', 'GEO', 'Statistics', 'VALUE']) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 10:07:15 2018
@author: Pool
"""
import pandas as pd
import pickle
import matplotlib.pyplot as plt
# 500次紀錄一點 500點 25萬次 四次 共100萬次
p1 = pickle.load(open('case201_pickle.dat', 'rb'))
p2 = pickle.load(open('case202_pickle.dat', 'rb'))
p3 = pickle.load(open('case203_pickle.dat', 'rb'))
p4 = pickle.load(open('case204_pickle.dat', 'rb'))
p5 = pickle.load(open('case205_pickle.dat', 'rb'))
p6 = pickle.load(open('case211_pickle.dat', 'rb'))
p7 = pickle.load(open('case212_pickle.dat', 'rb'))
p8 = pickle.load(open('case213_pickle.dat', 'rb'))
p9 = pickle.load(open('case214_pickle.dat', 'rb'))
p10 = pickle.load(open('case215_pickle.dat', 'rb'))
n = 500
fig1 = plt.figure(figsize=(15, 8))
plt.plot(p1[:n], 'o', linewidth=3, label='case-1')
plt.plot(p2[:n], 'o', linewidth=3, label='case-2')
plt.plot(p3[:n], 'o', linewidth=3, label='case-3')
plt.plot(p4[:n], 'o', linewidth=3, label='case-4')
plt.plot(p5[:n], 'o', linewidth=3, label='case-5')
plt.plot(p6[:n], 'o', linewidth=3, label='case-6')
plt.plot(p7[:n], 'o', linewidth=3, label='case-7')
plt.plot(p8[:n], 'o', linewidth=3, label='case-8')
plt.plot(p9[:n], 'o', linewidth=3, label='case-9')
plt.plot(p10[:n], 'o', linewidth=3, label='case-10')
plt.legend()
#def chg(lst):
# for n in range(len(lst)):
# lst[n] = lst[n]*(n+1)
# for n in range(len(lst)-1, 0, -1):
# lst[n] = lst[n]-lst[n-1]
# return lst
#chg(p1)
#chg(p2)
#chg(p3)
#chg(p4)
#chg(p5)
lst1 = []
lst1.extend(p1)
lst1.extend(p2)
lst1.extend(p3)
lst1.extend(p4)
lst1.extend(p5)
lst2 = []
lst2.extend(p6)
lst2.extend(p7)
lst2.extend(p8)
lst2.extend(p9)
lst2.extend(p10)
lst =[]
lst.extend(lst1)
lst.extend(lst2)
#del p1, p2, p3, p4, p5
lst = pd.Series(lst, name='run')
lst = lst/20
lst1 = pd.Series(lst1, name='run')
lst1 = lst1/20
lst2 = pd.Series(lst2, name='run')
lst2 = lst2/20
#lsst = [p1,p2,p3,p4,p5,p6,p7,p8,p9,p10]
#for n in lsst:
# n = pd.Series(n, name='run')
#for n in lsst:
# n = n/20
#for n in lsst:
# print(n.describe())
p1 = pd.Series(p1, name='run')
p2 = | pd.Series(p2, name='run') | pandas.Series |
from IPython.core.interactiveshell import InteractiveShell
from wordcloud import WordCloud, STOPWORDS
import os
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pingouin as pg
from loguru import logger
from GEN_Utils import FileHandling
from GEN_Utils.HDF5_Utils import hdf_to_dict
logger.info('Import OK')
input_path = 'analysis_results/scival_test/ten_year_metrics_summary.xlsx'
output_folder = 'analysis_results/stats_metrics/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Print all lone variables during execution
InteractiveShell.ast_node_interactivity = 'all'
# Set plotting backgrounds to white
matplotlib.rcParams.update(_VSCode_defaultMatplotlib_Params)
matplotlib.rcParams.update({'figure.facecolor': (1, 1, 1, 1)})
metrics = pd.read_excel(input_path)
metrics.head(100)
# in any case where values were not read properly, discard
def value_checker(value):
try:
return float(value)
except:
return np.nan
metrics['fwci_awarded'] = metrics['fwci_awarded'].apply(value_checker)
metrics['pubs_awarded'] = metrics['pubs_awarded'].apply(value_checker)
# Collect datapoints per year
pubs_list = {}
fwci_list = {}
for year, df in metrics.groupby(['Year']):
for level, data in df.groupby('type_cat'):
pubs_list[f'{year}_{level}'] = list(data['pubs_awarded'])
fwci_list[f'{year}_{level}'] = list(data['fwci_awarded'])
# Generate separate dataframes
pubs = pd.DataFrame(dict([(k, | pd.Series(v) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.