prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import json
import numpy as np
import pandas as pd
import pickle
import sklearn
def process_input(request_data: str) -> pd.DataFrame:
"""
asserts that the request data is correct.
:param request_data: data gotten from the request made to the API
:return: the values from the dataframe
"""
parsed_body = json.loads(request_data)["inputs"]
assert len(parsed_body) >= 1 #"'inputs' must be a dictionary (or dictionaries) with features"
data = {'neighbourhood':[],
'Area':[],
'Number_of_rooms':[],
'Build_year':[],
'Floor':[],
'Nearest_educational_institution':[],
'Nearest_shop':[],
'Public_transport_stop':[],
'Heating_system':[],
'energy_class':[],
'Building_type':[],
'No_of_floors':[]
}
for item in range(len(parsed_body)):
data["neighbourhood"].append(parsed_body[item]["neighbourhood"])
data["Area"].append(parsed_body[item]["Area"])
data["Number_of_rooms"].append(parsed_body[item]["Number_of_rooms"])
data["Build_year"].append(parsed_body[item]["Build_year"])
data["Floor"].append(parsed_body[item]["Floor"])
data["Nearest_educational_institution"].append(parsed_body[item]["Nearest_educational_institution"])
data["Nearest_shop"].append(parsed_body[item]["Nearest_shop"])
data["Public_transport_stop"].append(parsed_body[item]["Public_transport_stop"])
data["Heating_system"].append(parsed_body[item]["Heating_system"])
data["energy_class"].append(parsed_body[item]["energy_class"])
data["Building_type"].append(parsed_body[item]["Building_type"])
data["No_of_floors"].append(parsed_body[item]["No_of_floors"])
data_to_be_modelled = | pd.DataFrame(data) | pandas.DataFrame |
from typing import Union
import pandas as pd
import numpy as np
from pandas import Series, DataFrame
from pandas.core.arrays import ExtensionArray
from sklearn import preprocessing
import time
def convert_date_2_timestamp(date_str):
time_array = time.strptime(date_str, "%Y%m%d")
return int(time.mktime(time_array));
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
def load_pandas_df(
size="100k",
header=None,
local_cache_path=None,
title_col=None,
genres_col=None,
year_col=None,
):
return pd.read_csv('user_item_rating.csv')
if __name__ == '__main__':
# user data
user_df = pd.read_table('user_list.txt', header=None)
user_df.columns = ['user_code']
user_df['id'] = user_df.index
user_map = dict(zip(user_df['user_code'].values, user_df['id'].values))
# item data
item_df = pd.read_table('item_list.txt', header=None)
item_df.columns = ['item_code']
item_df['id'] = item_df.index
item_map = dict(zip(item_df['item_code'].values, item_df['id'].values))
order_df = pd.read_csv('cust_prod_order_07-10.csv')
order_df_train = order_df[order_df['DAY_WID'].isin(['20200706', '20200707', '20200707', '20200708', '20200709', '20200710', '20200711', '20200712', '20200713'])]
order_df_train.sort_values(by='DAY_WID')
order_df_train['reviewerID'] = order_df_train['CUST_INTERNAL_CODE'].map(user_map)
order_df_train['asin'] = order_df_train['COMPANY_PRODUCT_CODE'].map(item_map)
order_df_train = order_df_train.dropna(axis=0, how='any')
order_df_train['reviewerID'] = order_df_train['reviewerID'].astype(int)
order_df_train['asin'] = order_df_train['asin'].astype(int)
user_item_train = pd.DataFrame(columns=['reviewerID', 'asin', 'DAY_WID'])
user_item_train[['reviewerID', 'asin', 'DAY_WID']] = order_df_train[['reviewerID', 'asin', 'DAY_WID']]
#生成rating数据
min_max_scaler = preprocessing.MinMaxScaler()
user_item_rating = | pd.DataFrame(columns=['userID', 'itemID', 'rating', 'timestamp']) | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))
self.assertTrue(result.levels[1].equals(Index(['a','b'])))
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),
('bar', 'a'), ('bar', 'b'), ('bar', 'c'),
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
assert_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
assert_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
(2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
assert_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
self.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
assert_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if compat.PY3:
raise nose.SkipTest("testing for legacy pickles not support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = | MultiIndex.from_tuples(obj.values) | pandas.core.index.MultiIndex.from_tuples |
import csv
import os
# vymery k jednotlivym zakazkam, podle kterych se bude provaddet alokace
RR_soubor='Rentroll_podklad.csv'
#-----------------------------------------------------------------------------'
def uprava_cisla(hodnota):
try:
return int(hodnota)
except ValueError:
return float(hodnota.replace(',','.'))
# vytvori seznam zakazek a k nim nacte plochy a volnych bytu do slovniku
Zakazky_all=[]
SQM_total=0
Vacant_total=0
with open(RR_soubor, 'r', encoding='utf-8') as zakazky_source:
zakazky_data = csv.reader(zakazky_source, delimiter=';', quotechar='|')
next(zakazky_data)
for row in zakazky_data:
SQM_upraveno= uprava_cisla(row[1])
Vacant_upraveno=uprava_cisla(row[2])
zakazka={'Zak':row[0], 'SQM':SQM_upraveno, 'Vacant':Vacant_upraveno}
Zakazky_all.append(zakazka)
SQM_total+=SQM_upraveno
Vacant_total+=Vacant_upraveno
# ke kazde zakazce prida podil na sqm a vacant
for zakazka in Zakazky_all:
zakazka['Sqm_podil']=zakazka['SQM']/SQM_total
zakazka['Vacant_podil']=zakazka['Vacant']/Vacant_total
#vytiskne kontrolni info k zakazkam
print('Celkovy pocet zakazek :',len(Zakazky_all))
print('Celkovy pocet sqm :', round(SQM_total,2))
print('Celkovy pocet volnych bytu :', Vacant_total)
#-----------------------------------------------------------------------------'
#nacteni a uprava SAP exportu do formatu pro import do BPC
#nacte do dataframe soubor za SAP
import pandas
import numpy
zahlavi = ['Month','Year','Zak','Account','Amount','NS','PC','H5','H6',"Acctype"]
types = {'Month': numpy.str, 'Year': numpy.str,'Zak':numpy.str, 'Account': numpy.str, 'Amount': numpy.str, 'NS': numpy.str, 'PC': numpy.str, 'H5': numpy.str, 'H6': numpy.str, 'Acctype': numpy.str}
export = pandas.read_csv('SAP_export_zakazky.csv', sep=';', delimiter=None, skiprows=1, names=zahlavi, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=types, engine='python', converters=None, true_values=None, false_values=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, parse_dates=False, keep_date_col=False, date_parser=None, thousands=None, decimal=b'.', lineterminator=None, skipfooter=1, delim_whitespace=False, float_precision=None)
# uprava castky na desetinne misto
export['Amount'] = export['Amount'].str.replace(',','.')
export['Amount'] = export['Amount'].str.replace(' ','')
export['Amount'] = export['Amount'].astype(numpy.float64)
# uprava time
Month=export['Month']
if (len(Month[0])) == 1:
export['Month'] = '0'+ export['Month']
else:
export['Month']
export['Year'] = export['Year'].str.replace('20','')
export['Month'] = export.Month.astype(str).str.cat(export.Year.astype(str), sep='.')
export = export.drop('Year', 1)
export = export.rename(columns={'Month': 'Time'})
#nahrazeni EPM chyby ve sloupcich H5 a H6
export['H5'] = export['H5'].str.replace('The member requested does not exist in the specified hierarchy.','x')
export['H6'] = export['H6'].str.replace('The member requested does not exist in the specified hierarchy.','x')
# smazani sloupce NS
export = export.drop('NS', 1)
# pridani EPM dimenzi
export['IntCo'] = 'Non_Interco'
export['Currency'] = 'LC'
export['Measures'] = 'Periodic'
export['Entity'] = '0'+ export['PC'].str.get(0) + export['PC'].str.get(1)
export['Datasrc'] = numpy.where(export['Account'].str.get(0) == 'P', 'IFRS', numpy.where(export['Account'].str.get(0) == 'R', 'IFRS', 'REP'))
blank_zak = ['800000000','800000001','800000002','800000003','800000004','810000000','820000000','830000000','840000000']
export['Zak'] = numpy.where(export['Zak'].isin(blank_zak), 'nan', export['Zak'])
#duplicity na zaklade kombinace cisla zakazky a cisla uctu vyhazuji, pomijime PC, pro tento druh reportu nepodstatne
export['Amount'] = export.groupby(['Zak', 'Account'])['Amount'].transform('sum')
export.drop_duplicates(subset=['Zak', 'Account'], keep='first', inplace=True)
#-----------------------------------------------------------------------------'
#alokace upravene tabulky podle zpusobu alokace na zaklade podilu daneho klice na celkovych zakazkach
#pomocny sloupec pro alokace
conditions = [
(export['Zak'] == 'nan') & (export['H6'].str.contains('JC09', na=False, regex=False) == True),
(export['H5'] == 'VacC') & (export['Entity'] == '065'),
(export['Zak'] == 'nan')]
klic = ['Vacant', 'VacC', 'Area']
export['Alokace'] = numpy.select(conditions, klic, default='OK')
#vytvoreni alokacni tabulky a jeji rozdeleni na ctyri casti podle zpusobu alokace
# tabulka VacC, ktera se dale nealokuje se rovnou ulozi do souboru VacC
Table_Alokace= | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [Timestamp(x) for x in ['2017-01-01', '2014-01-01',
'2016-01-01', '2015-01-01']]
df = pd.DataFrame({'a': d1, 'b': d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ['2015-01-01', '2016-01-01',
'2016-01-01', np.nan]]
d4 = [Timestamp(x) for x in ['2014-01-01', '2015-01-01',
'2017-01-01', '2016-01-01']]
expected = pd.DataFrame({'a': d3, 'b': d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=['a', 'b'], )
tm.assert_frame_equal(sorted_df, expected)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
| assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
'''
This program will simulate leveling a DnD character, showing their ending HP, and stats.
'''
import argparse
import csv
import json
import re
import time
from openpyxl import load_workbook
from pandas import DataFrame
from src import classes, util
def import_race_data(file_path):
'''
This method imports data from the inputed CSV and returns a dictionary containing
all of the data formated by race and subrace
Arguments:
:param import_data: (str) The filepath to the data
Returns:
dict: The dictionary of all of the data
'''
retval = {}
# Open csv file and read in all data
with open(file_path) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
race = row['Race']
subrace = row['Subrace']
if(subrace):
if(race in retval):
if('Subraces' not in retval[race]):
retval[race]['Subraces'] = {}
retval[race]['Subraces'][subrace] = row
else:
retval[race] = {'Subraces':{}}
retval[race]['Subraces'][subrace] = row
else:
retval[race] = row
return retval
def update_mode(args):
'''
This method is the main method for running this program in Update mode.
Update mode takes in a specifically formated XLSX file and outputs a JSON
file containing all of the data for races and subraces needed by the
program in run mode
Arguments:
:param args: (dict) A dictionary containing the needed arguments
Returns:
bool: Whether or not the update completed successfully or not
'''
# Lets first open the workbook
try:
workbook = load_workbook(args['xlsx_file'])
except:
return False
# Now turn the Race sheet into a dataframe
df = DataFrame()
for name in workbook.sheetnames:
if('Race' in name):
df = | DataFrame(workbook[name].values) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import orca
from urbansim_templates import utils
def test_parse_version():
assert utils.parse_version('0.1.0.dev0') == (0, 1, 0, 0)
assert utils.parse_version('0.115.3') == (0, 115, 3, None)
assert utils.parse_version('3.1.dev7') == (3, 1, 0, 7)
assert utils.parse_version('5.4') == (5, 4, 0, None)
def test_version_greater_or_equal():
assert utils.version_greater_or_equal('2.0', '0.1.1') == True
assert utils.version_greater_or_equal('0.1.1', '2.0') == False
assert utils.version_greater_or_equal('2.1', '2.0.1') == True
assert utils.version_greater_or_equal('2.0.1', '2.1') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.2') == True
assert utils.version_greater_or_equal('1.1.2', '1.1.3') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.3') == True
assert utils.version_greater_or_equal('1.1.3.dev1', '1.1.3.dev0') == True
assert utils.version_greater_or_equal('1.1.3.dev0', '1.1.3') == False
###############################
## get_df
@pytest.fixture
def df():
d = {'id': [1,2,3], 'val1': [4,5,6], 'val2': [7,8,9]}
return | pd.DataFrame(d) | pandas.DataFrame |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = | pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues']) | pandas.DataFrame |
import pickle
from pathlib import Path
from typing import Optional, List, Iterable, Dict, Any
import click
import pandas as pd
import torch
from tqdm import tqdm
from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, \
openai_gpt, ctrl, pplm, gpt2mcm
from utils.constants import PERSPECTIVE_API_ATTRIBUTES_LOWER
from utils.perspective_api import PerspectiveWorker, unpack_scores
from utils.utils import load_jsonl, batchify
import sys
import os
ALLOWED_MODELS = ['gpt3', 'gpt2', 'gpt2-affect', 'gpt2-ctrl', 'gpt2-greedy', 'gpt2-naughty-list',
'pplm', 'ctrl', 'openai-gpt', 'xlnet',
'gpt2mcm']
def make_generations_col(generations, responses):
for generation, response in zip(generations, responses):
if response['response']:
response = unpack_scores(response['response'])[0]
else:
response = {x: None for x in PERSPECTIVE_API_ATTRIBUTES_LOWER}
yield {'text': generation, **response}
def collate(dataset: Optional[pd.DataFrame], generations: List[str], responses: Iterable[Dict[str, Any]],
output_file: str):
generations_col_iter = make_generations_col(generations, responses)
if dataset is None:
generations_col = list(tqdm(generations_col_iter, total=len(generations), desc='Collating files'))
dataset = pd.DataFrame(generations_col)
else:
assert len(generations) % len(dataset) == 0
n = len(generations) // len(dataset)
print(f"Detected samples per prompt:", n)
generations_col = list(tqdm(batchify(generations_col_iter, n), total=len(dataset), desc='Collating files'))
dataset['generations'] = generations_col
dataset.to_json(output_file, orient='records', lines=True)
@click.command()
@click.argument('output-dir')
@click.option('--dataset-file', required=False, type=str,
help='JSONL file containing prompts data. Each row must contain a prompt at `row["prompt"]["text"]`.')
@click.option('--use-eos/--use-dataset', default=False, help='Whether to use EOS or a dataset file for generation.')
@click.option('--model', required=True, help='Equivalent to `model_name_or_path` in transformers.')
@click.option('--model-type', required=True,
type=click.Choice(ALLOWED_MODELS))
@click.option('--perspective-rate-limit', default=25)
@click.option('--n', default=25, help='Number of samples to generate for each prompt. When used with --eos')
@click.option('--max-tokens', default=20, help='Number of tokens (usually BPE) to generate for each prompt.')
@click.option('--batch-size', default=32)
@click.option('--start-index', default=0)
@click.option('--end-index', default=-1)
@click.option('--mcm-t', default=0.0)
@click.option('--mcm-keepmin', default=30)
@click.option('--top-k', default=50)
@click.option('--resume/--no-resume', default=False)
@click.option('--usemcm/--no-usemcm', default=True, help='Use mcm or not. When used with --model-type gpt2mcm')
def main(output_dir: str, dataset_file: Optional[str], use_eos: bool, model: str, model_type: str, n: int,
max_tokens: int, batch_size: int, perspective_rate_limit: int, start_index: int, end_index: int,
mcm_t: float, mcm_keepmin: int, top_k: int,
resume: bool, usemcm: bool):
# Load prompts
if dataset_file:
assert not use_eos
# Load prompts from dataset file
assert dataset_file.endswith('.jsonl')
dataset = pd.read_json(dataset_file, lines=True)
dataset = dataset[start_index:end_index]
prompts = pd.json_normalize(dataset['prompt'])['text']
elif use_eos:
assert not dataset_file
dataset = None
# Create EOS prompts
if model_type in ['gpt2', 'gpt2-affect', 'gpt2-naughty-list', 'pplm', 'gpt2mcm']:
prompts = | pd.Series('<|endoftext|>') | pandas.Series |
#Helper
##############
from Helper import split_sequence
from Helper import layer_maker
##Startup
##############
# Library Imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
plt.style.use("ggplot")
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
# Loading the Data
df = pd.read_csv("Algo-USD.csv")
# Data Preprocessing
# Setting the datetime index as the date
# Only selecting the 'Close' column
# Only the last 1000 closing prices.
df = df.set_index("Date")[['Close']].tail(1000)
df = df.set_index( | pd.to_datetime(df.index) | pandas.to_datetime |
import pandas as pd
from pandas.plotting import lag_plot
import numpy as np
import matplotlib as mlp
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import seaborn as sns
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
import pmdarima as pm
from ipywidgets import *
from IPython.display import display, HTML
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
dataurl = 'https://raw.githubusercontent.com/ming-zhao/Business-Analytics/master/data/time_series/'
df_house = pd.read_csv(dataurl+'house_sales.csv', parse_dates=['date'], header=0, index_col='date')
df_house['year'] = [d.year for d in df_house.index]
df_house['month'] = [d.strftime('%b') for d in df_house.index]
df_drink = pd.read_csv(dataurl+'drink_sales.csv', parse_dates=['date'], header=0)
df_drink['date'] = [pd.to_datetime(''.join(df_drink.date.str.split('-')[i][-1::-1]))
+ pd.offsets.QuarterEnd(0) for i in df_drink.index]
df_drink = df_drink.set_index('date')
# df_drink[['q','year']]=df_drink['quarter'].str.split('-',expand=True)
df_drink['year'] = [d.year for d in df_drink.index]
df_drink['quarter'] = ['Q'+str(d.month//3) for d in df_drink.index]
def sinusoidal(x):
return np.sin(2 * np.pi * x)
def create_data(func, sample_size, std, domain=[0, 1]):
x = np.linspace(*domain, sample_size)
np.random.shuffle(x)
t = func(x) + np.random.normal(scale=std, size=x.shape)
return x, t
def training_data(show):
np.random.seed(11223)
x_train, t_train = create_data(sinusoidal, 13, 0.25)
x_test = np.linspace(0, 1, 100)
t_test = sinusoidal(x_test)
plt.scatter(x_train, t_train, facecolor="none", edgecolor="b", s=50, label="training data")
if show:
plt.plot(x_test, t_test, c="g", label="$\sin(2\pi x)$")
plt.ylim(-1.5, 1.5)
plt.legend(loc=1)
plt.show()
def poly_fit(show):
np.random.seed(11223)
x_train, t_train = create_data(sinusoidal, 13, 0.25)
x_test = np.linspace(0, 1, 100)
t_test = sinusoidal(x_test)
fig = plt.figure(figsize=(15, 4))
for i, degree in enumerate([1, 3, 9]):
plt.subplot(1, 3, i+1)
poly = PolynomialFeatures(degree=degree, include_bias=True)
model = LinearRegression()
model.fit(poly.fit_transform(x_train[:,None]),t_train[:,None])
t = model.predict(poly.fit_transform(x_test[:,None]))
plt.scatter(x_train, t_train, facecolor="none", edgecolor="b", s=50, label="training data")
if show:
plt.plot(x_test, t_test, c="g", label="$\sin(2\pi x)$")
plt.plot(x_test, t, c="r", label="fitting")
plt.ylim(-1.5, 1.5)
plt.legend(loc=1)
plt.title("polynomial fitting with dregree {}".format(degree))
plt.show()
def poly_fit_holdout(show, train, test):
np.random.seed(11223)
x_train, t_train = create_data(sinusoidal, 13, 0.25)
x_test = np.linspace(0, 1, 100)
t_test = sinusoidal(x_test)
fig = plt.figure(figsize=(15, 4))
for i, degree in enumerate([1, 3, 9]):
plt.subplot(1, 3, i+1)
poly = PolynomialFeatures(degree=degree, include_bias=True)
model = LinearRegression()
model.fit(poly.fit_transform(x_train[:-3,None]),t_train[:-3,None])
t = model.predict(poly.fit_transform(x_test[:,None]))
if train:
plt.scatter(x_train[:-3], t_train[:-3], facecolor="none", edgecolor="b", s=50, label="training data")
if test:
plt.scatter(x_train[-3:], t_train[-3:], facecolor="none", edgecolor="orange", s=50, label="testing data")
if show:
plt.plot(x_test, t_test, c="g", label="$\sin(2\pi x)$")
plt.plot(x_test, t, c="r", label="fitting")
plt.ylim(-1.5, 1.5)
plt.legend(loc=1)
plt.title("polynomial fitting with dregree {}".format(degree))
plt.show()
# noise = pd.Series(np.random.randn(200))
# def randomwalk(drift):
# return pd.Series(np.cumsum(np.random.uniform(-1,1,(200,1)) + drift*np.ones((200,1))))
def random_walk(drift):
np.random.seed(123)
# randomwalk(drift).plot(title='Random Walk')
pd.Series(np.cumsum(np.random.uniform(-1,1,(200,1)) + drift*np.ones((200,1)))).plot(title='Random Walk')
plt.show()
def plot_time_series(df, col_name, freq='Month', title=''):
ax = df.plot(y=col_name, figsize=(15,6), x_compat=True)
ax.set_xlim(pd.to_datetime(df.index[0]),
pd.to_datetime(str(pd.Timestamp(df.index[-1]).year+1) + '-01-01'))
if freq=='Month':
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=12))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%Y'))
plt.title(title)
plt.show()
def seasonal_plot(df, col_names, title=''):
np.random.seed(100)
years = | pd.Series([x.year for x in df.index]) | pandas.Series |
#%%
import pandas as pd
from src.models.data_modules import *
from biasbalancer.utils import label_case
# %%
# Train sizes
def get_size(dm, which):
attrname = which+'_idx'
idx = getattr(dm, attrname)
return len(idx)
def get_dataset_info(dm):
index = [0]
if hasattr(dm, 'fold'):
n_folds = dm.kf.get_n_splits()
df = []
for i in range(n_folds):
dm.make_KFold_split(fold = i)
tmp = pd.DataFrame({
'dataset': dm.dataset_name,
'fold': str(i),
'train_size': get_size(dm, 'train'),
'val_size': get_size(dm, 'val'),
'test_size': get_size(dm, 'test'),
}, index = index)
df.append(tmp)
res = pd.concat(df).reset_index(drop = True)
else:
res = pd.DataFrame({
'dataset': dm.dataset_name,
'fold': ' ',
'train_size': get_size(dm, 'train'),
'val_size': get_size(dm, 'val'),
'test_size': get_size(dm, 'test'),
}, index = index)
hyperparams = get_hyperparameter_info(dm)
res = pd.concat([res, hyperparams], axis = 1)
return res
def get_hyperparameter_info(dm):
hyper_paths = {
'German Credit': 'data/predictions/german_credit_nn_pred_hyperparams',
'Catalan Recidivism': 'data/predictions/catalan-juvenile-recidivism/catalan_recid_nn_pred_hyperparams',
'Taiwanese Credit': 'data/predictions/taiwanese_nn_pred_hyperparams'}
hyperpath = hyper_paths[dm.dataset_name] + '.csv'
hyperdf = | pd.read_csv(hyperpath) | pandas.read_csv |
"""A set of unit tests for the helper functions."""
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
from precon.helpers import axis_vals_as_frame
from test.conftest import create_dataframe
class TestAxisValsAsFrame:
"""Tests for the axis_vals_as_frame function.
Uses one input dataset to test the following test cases, when:
* axis = 1
* axis = 0
* axis = 1 and converter = lambda x: x.month
* axis = 0, levels = 1 and conveter = lambda x: x.str.upper()
"""
@pytest.fixture
def input_data(self):
"""Return the input data for axis_vals_as_frame."""
df = create_dataframe(
[ # A and B cols are set to the index
('A', 'B', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
(0, 'foo', None, None, None, None),
(1, 'bar', None, None, None, None),
(2, 'baz', None, None, None, None),
(3, 'qux', None, None, None, None),
],
)
df = df.set_index(['A', 'B'])
df.columns = pd.to_datetime(df.columns)
return df
@pytest.fixture
def expout_column_values(self):
"""Return the exp output for axis = 1 case."""
df = create_dataframe(
[ # A and B cols are set to the index
('A', 'B', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
(0, 'foo', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
(1, 'bar', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
(2, 'baz', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
(3, 'qux', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
],
)
df = df.set_index(['A', 'B'])
df.columns = pd.to_datetime(df.columns)
# Convert all the df values to datetime
return df.apply(pd.to_datetime)
def test_that_col_values_broadcast_across_all_rows_in_df(
self,
input_data,
expout_column_values,
):
"""Unit test for axis = 1 case."""
# GIVEN a DataFrame and axis argument = 1 for columns
# WHEN axis_vals_as_frame function returns
# THEN returns a DataFrame with the column values broadcast across each row.
true_output = axis_vals_as_frame(input_data, axis=1)
assert_frame_equal(true_output, expout_column_values)
@pytest.fixture
def expout_index_values(self):
"""Return the exp output for axis = 0 case."""
df = create_dataframe(
[ # A and B cols are set to the index
('A', 'B', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
(0, 'foo', (0, 'foo'), (0, 'foo'), (0, 'foo'), (0, 'foo')),
(1, 'bar', (1, 'bar'), (1, 'bar'), (1, 'bar'), (1, 'bar')),
(2, 'baz', (2, 'baz'), (2, 'baz'), (2, 'baz'), (2, 'baz')),
(3, 'qux', (3, 'qux'), (3, 'qux'), (3, 'qux'), (3, 'qux')),
],
)
df = df.set_index(['A', 'B'])
df.columns = pd.to_datetime(df.columns)
return df
def test_that_index_values_broadcast_across_all_columns_in_df(
self,
input_data,
expout_index_values,
):
"""Unit test for axis = 0 case."""
# GIVEN a DataFrame and axis argument = 0 for index
# WHEN axis_vals_as_frame function returns
# THEN returns a DataFrame with the index values broadcast across each col.
true_output = axis_vals_as_frame(input_data, axis=0)
assert_frame_equal(true_output, expout_index_values)
@pytest.fixture
def expout_months_from_cols(self):
"""Return exp output for axis=1 and converter=lambda x: x.month case."""
df = create_dataframe(
[ # A and B cols are set to the index
('A', 'B', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
(0, 'foo', 1, 2, 3, 4),
(1, 'bar', 1, 2, 3, 4),
(2, 'baz', 1, 2, 3, 4),
(3, 'qux', 1, 2, 3, 4),
],
)
df = df.set_index(['A', 'B'])
df.columns = pd.to_datetime(df.columns)
return df
def test_that_broadcasts_col_vals_across_rows_with_converter(
self,
input_data,
expout_months_from_cols,
):
"""Unit test for axis=1 and converter=lambda x: x.month case."""
# GIVEN a DataFrame, axis = 1 argument and a lambda function to get the months attr
# WHEN axis_vals_as_frame function returns
# THEN returns a DataFrame with the months broadcast across each row.
true_output = axis_vals_as_frame(
input_data,
axis=1,
converter=lambda x: x.month,
)
assert_frame_equal(true_output, expout_months_from_cols)
@pytest.fixture
def expout_index_level_1_all_caps(self):
"""Return exp output for axis=0, levels=1 and converter = lambda x: x.upper() case."""
df = create_dataframe(
[ # A and B cols are set to the index
('A', 'B', '2017-01-01', '2017-02-01', '2017-03-01', '2017-04-01'),
(0, 'foo', 'FOO', 'FOO', 'FOO', 'FOO'),
(1, 'bar', 'BAR', 'BAR', 'BAR', 'BAR'),
(2, 'baz', 'BAZ', 'BAZ', 'BAZ', 'BAZ'),
(3, 'qux', 'QUX', 'QUX', 'QUX', 'QUX'),
],
)
df = df.set_index(['A', 'B'])
df.columns = pd.to_datetime(df.columns)
return df
def test_that_broadcasts_index_level_1_vals_to_columns_with_converter(
self,
input_data,
expout_index_level_1_all_caps,
):
"""Unit test for axis=0, levels=1 and converter = lambda x: x.upper() case."""
# GIVEN a DataFrame, axis=0, levels=1 and converter=lambda x: x.upper() as args
# WHEN axis_vals_as_frame function returns
# THEN returns a DataFrame with level 1 in all caps broadcast across each col.
true_output = axis_vals_as_frame(
input_data,
axis=0,
levels=1,
converter=lambda x: x.str.upper(),
)
| assert_frame_equal(true_output, expout_index_level_1_all_caps) | pandas._testing.assert_frame_equal |
import pandas as pd
import sqlite3
from datetime import datetime, timedelta, date
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
import selectStock_datetime
def scaler(result_df:pd.DataFrame) -> pd.DataFrame:
"""
date를 제외한 나머지 컬럼 0과 1사이로 정규화하는 함수
result_df : 정규화할 데이터 프레임 데이터
"""
date_c = list(result_df['date'])
x = result_df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
result_df = pd.DataFrame(x_scaled, columns=result_df.columns)
result_df['date'] = date_c
return result_df
def service(code:str, start_day:int, end_day:int, period:int, drop_holi = 0, stock_moving_avg = 1, day_shift = 0) -> pd.DataFrame:
'''
각 옵션을 입력받아 해당 기간의 데이터를 DB로부터 조회하여 원하는 형태로 가공하여 리턴하는 함수
-- 옵션 설명 --
code : 조회할 종목이름
start_day : 조회를 시작 날짜
end_day : 조회 종료 날짜
period : 뉴스 긍부정과 주가를 이동평균 낼 기간
drop_holi : 주말 혹은 공휴일 뉴스를 사용할지 여부. 0 (디폴트) - 다음 영업일 주가로 채워서 사용 / 1 - 주말 및 공휴일 데이터는 drop
stock_moving_avg : 주가를 이동평균 낼지 여부. 1 (디폴트) - 주가 이동평균 사용 / 0 - 이동평균 사용안함
day_shift : 뉴스와 주가의 몇 일의 텀을 두고 분석할 지. 0(디폴트) | +x - 해당일의 뉴스와 다음날의 주가 분석 | -x - 해당일의 뉴스와 전날의 주가 분석
-- 리턴 설명--
result_df : 날짜 / 긍부정 / 주가 등락 결과를 정제한 데이터프레임
all_keyword : 조회한 기간 중 전체의 키워드
pos_keyword : 조회한 기간 중 주가가 오른날의 키워드
neg_keyword : 조회한 기간 중 주가가 내린날의 키워드
df_length : 조회한 기간의 데이터프레임 길이
'''
# 이동평균을 고려하여 DB에서 조회할 날짜 설정
inq_day = (datetime.strptime(str(start_day), "%Y%m%d").date() - timedelta(days = period - 1)).strftime('%Y%m%d')
end_day = str(end_day)
# db 경로는 로컬에 맞게 설정해야함
conn = sqlite3.connect("DB/2jo.db")
# 커서 바인딩
c = conn.cursor()
# 뉴스데이터 조회
# query = c.execute(f"select a.id, a.date, a.code, b.senti, b.senti_proba from news_db b join news_id a on b.id = a.id where a.date BETWEEN {inq_day} and {end_day};")
query = c.execute(f"select a.id, a.date, a.code, b.keyword, b.senti, b.senti_proba from news_db b join news_id a on b.id = a.id where a.code = \'{code}\' and (a.date BETWEEN {inq_day} and {end_day});")
# 컬럼명 조회
cols = [column[0] for column in query.description]
# 데이터 프레임으로 만들기
news_result_df = pd.DataFrame.from_records(data=query.fetchall(), columns=cols)
# 키워드 데이터프레임 만들어 놓기
keyword_result_df = news_result_df.groupby('date')['keyword'].apply(lambda x: x.sum())
# 커서 닫기 - 일단 주석처리함
# conn.close()
# 주가 데이터 조회
query = c.execute(f"select s_date, s_code, f_rate from stock_db where s_code = \'{code}\' and (s_date BETWEEN {inq_day} and {end_day});")
# 컬럼명 조회
cols = [column[0] for column in query.description]
# 데이터 프레임으로 만들기
stock_result_df = pd.DataFrame.from_records(data=query.fetchall(), columns=cols)
stock_result_df.rename(columns={'s_date': 'date', 's_code': 'code', 'f_rate': 'UpDown'}, inplace=True)
# 데이터프레임 길이 반환
df_length = len(stock_result_df)
# 주말 및 공휴일 drop 여부는 옵션에 따라; 디폴트는 드랍안함
if drop_holi:
# 주말이나 공휴일 등으로 주가가 빠진 날은 drop
merge_outer_df = | pd.merge(news_result_df,stock_result_df, how='outer',on='date') | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# ## HUDBDC overtime analysis
# In[1]:
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from glob import glob
from datetime import datetime as dt
#Suppress warning
pd.set_option('mode.chained_assignment', None)
# Read all xls files from desired place
files = glob(os.path.dirname(os.path.abspath(__file__)) + '/Excel/OT*.xls')
# Read first excel file into dataframe
df = pd.read_excel(files[0], header=7, keep_default_na=False,
usecols=range(2, 10), skip_blank_lines=True)
# Read further excel files and concatenate them together
for i in range(1, len(files)):
df = pd.concat([df, pd.read_excel(files[i], header=7, keep_default_na=False,
usecols=range(2, 10), skip_blank_lines=True)])
# Change column names
df.rename(columns={'List of people': 'Employees', 'Date of overtime': 'Date',
'Nr. of hours': 'Hours'}, inplace=True)
# Remove unnecessary both rows and columns
mask1 = df['Employees'] != ''
mask2 = df['Start'] != ''
df = df[mask1 & mask2]
# Set Date column as datetime object and Hours as float64
df['Date'] = pd.to_datetime(df['Date'])
df['Hours'] = df['Hours'].astype('float64')
# Summarize hours by employees
df_sum_ot = df.groupby(['Employees']).sum()['Hours'].to_frame().reset_index()
# Create figure
fig = px.bar(df_sum_ot.sort_values('Hours', ascending=False),
x='Employees', y='Hours', text='Hours',
title='Sum of requested overtime between ' + str(df['Date'].min()).split(' ')[0] + ' and ' +
str(df['Date'].max()).split(' ')[0])
fig.update_traces(texttemplate='%{text:.2s}', textposition='outside')
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
#fig.show()
fig.write_html(sys.argv[0][:sys.argv[0].rfind('/')] + '/summary_overtime.html')
# In[2]:
#Change date value to month name
def month_name_column(row):
return dt.strftime(row, '%B')
#Dictionary to map month names to month numbers
month_map = {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5,
'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10,
'November': 11, 'December': 12}
#Create new column for month number
df['Month'] = df['Date'].apply(month_name_column)
#Group by Months
df_sum_hours = df.groupby(['Month']).agg({'Hours': 'sum'})
#Remove index
df_sum_hours.reset_index(inplace=True)
#Create new column for month numbers
df_sum_hours['Month number'] = df_sum_hours['Month'].map(month_map)
#Sort column
df_sum_hours.sort_values('Month number', inplace=True)
#Create chart
fig = go.Figure(go.Bar(
y=df_sum_hours['Month'],
x=df_sum_hours['Hours'],
orientation='h',
marker=dict(
color='rgba(89, 200, 23, 0.6)',
line=dict(color='rgba(89, 200, 23, 1.0)', width=3))
))
fig.update_layout(
xaxis=dict(
showgrid=False,
showline=False,
showticklabels=True,
zeroline=False,
domain=[0.15, 1]
),
yaxis=dict(
showgrid=False,
showline=False,
showticklabels=True,
zeroline=False,
),
barmode='stack',
paper_bgcolor='rgb(248, 248, 255)',
plot_bgcolor='rgb(248, 248, 255)',
margin=dict(l=120, r=10, t=140, b=80),
showlegend=False,
title='Number of overtime by months'
)
#fig.show()
fig.write_html(sys.argv[0][:sys.argv[0].rfind('/')] + '/summary_by_month.html')
# In[3]:
#Group by Customer and calculate sum of hours
df_sum_customers = df.groupby(['Customer']).agg({'Hours' : 'sum'})
#Create chart
fig = px.pie(df_sum_customers, values='Hours', names=df_sum_customers.index, title='Spent hours by customers')
fig.show()
# In[37]:
df_abn = df[df['Customer'] == 'ABN-AMRO']
df_abn_final = df_abn.groupby(['Month', 'Employees']).agg({'Hours' : 'sum'})
df_abn_final = df_abn_final.reset_index()
df_abn_total_hours = df_abn.groupby('Month').agg({'Hours' : 'sum'})
df_abn_total_hours.reset_index(inplace=True)
df_result = | pd.merge(df_abn_final, df_abn_total_hours, on='Month', suffixes=('', ' Total')) | pandas.merge |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with | tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False) | pandas._testing.assert_produces_warning |
import torch
from torch.nn.utils import clip_grad_norm_
torch.multiprocessing.set_sharing_strategy('file_system')
import pandas as pd
import numpy as np
from tqdm import tqdm
import heapq
from pathlib import Path
class Learning():
def __init__(self,
optimizer,
loss_fn,
device,
n_epoches,
scheduler,
freeze_model,
grad_clip,
grad_accum,
early_stopping,
validation_frequency,
calculation_name,
best_checkpoint_folder,
checkpoints_history_folder,
checkpoints_topk,
logger
):
self.logger = logger
self.optimizer = optimizer
self.loss_fn = loss_fn
self.device = device
self.n_epoches = n_epoches
self.scheduler = scheduler
self.freeze_model = freeze_model
self.grad_clip = grad_clip
self.grad_accum = grad_accum
self.early_stopping = early_stopping
self.validation_frequency = validation_frequency
self.calculation_name = calculation_name
self.best_checkpoint_path = Path(
best_checkpoint_folder,
'{}.pth'.format(self.calculation_name)
)
self.checkpoints_history_folder = Path(checkpoints_history_folder)
self.checkpoints_topk = checkpoints_topk
self.score_heap = []
self.summary_file = Path(self.checkpoints_history_folder, 'summary.csv')
if self.summary_file.is_file():
self.best_score = pd.read_csv(self.summary_file).best_metric.max()
logger.info('Pretrained best score is {:.5}'.format(self.best_score))
else:
self.best_score = 0
self.best_epoch = -1
def train_epoch(self, model, loader):
tqdm_loader = tqdm(loader)
current_loss_mean = 0
for batch_idx, (imgs, labels) in enumerate(tqdm_loader):
loss, predicted = self.batch_train(model, imgs, labels, batch_idx)
# just slide average
current_loss_mean = (current_loss_mean * batch_idx + loss) / (batch_idx + 1)
tqdm_loader.set_description('loss: {:.4} lr:{:.6}'.format(
current_loss_mean, self.optimizer.param_groups[0]['lr']))
return current_loss_mean
def batch_train(self, model, batch_imgs, batch_labels, batch_idx):
batch_imgs, batch_labels = batch_imgs.to(self.device), batch_labels.to(self.device)
predicted = model(batch_imgs)
loss = self.loss_fn(predicted, batch_labels)
loss.backward()
if batch_idx % self.grad_accum == self.grad_accum - 1:
clip_grad_norm_(model.parameters(), self.grad_clip)
self.optimizer.step()
self.optimizer.zero_grad()
return loss.item(), predicted
def valid_epoch(self, model, loader, local_metric_fn):
tqdm_loader = tqdm(loader)
current_score_mean = 0
eval_list = []
for batch_idx, (imgs, labels) in enumerate(tqdm_loader):
with torch.no_grad():
predicted = self.batch_valid(model, imgs)
labels = labels.numpy()
eval_list.append((predicted, labels))
score = local_metric_fn(predicted, labels)
current_score_mean = (current_score_mean * batch_idx + score) / (batch_idx + 1)
tqdm_loader.set_description('score: {:.5}'.format(current_score_mean))
return eval_list, current_score_mean
def batch_valid(self, model, batch_imgs):
batch_imgs = batch_imgs.to(self.device)
predicted = model(batch_imgs)
predicted = torch.sigmoid(predicted)
return predicted.cpu().numpy()
def process_summary(self, eval_list, epoch, global_metric_fn):
self.logger.info('{} epoch: \t start searching thresholds....'.format(epoch))
selected_score, (top_thr, area_thr, bot_thr) = global_metric_fn(eval_list)
epoch_summary = pd.DataFrame(
data=[[epoch, top_thr, area_thr, bot_thr, selected_score]],
columns = ['epoch', 'best_top_thr', 'best_area_thr', 'top_bot_thr', 'best_metric']
)
self.logger.info('{} epoch: \t Best triplets: {:.2}, {}, {:.2}'.format(epoch, top_thr, area_thr, bot_thr))
self.logger.info('{} epoch: \t Calculated score: {:.6}'.format(epoch, selected_score))
if not self.summary_file.is_file():
epoch_summary.to_csv(self.summary_file, index=False)
else:
summary = | pd.read_csv(self.summary_file) | pandas.read_csv |
import os
import copy
import glob
import h5py
import numpy as np
from matplotlib import pylab as plt
import pandas as pd
#import ebf
import astropy.units as units
from astropy.coordinates import SkyCoord
try:
from dustmaps.bayestar import BayestarWebQuery
except:
try:
from dustmaps.dustmaps.bayestar import BayestarWebQuery
except:
print("FAILED TO IMPORT BayestarWebQuery")
import mwdust
import pdb
from .direct import classify as classify_direct
from .grid import classify as classify_grid
from isoclassify import DATADIR
#DATADIR="/Users/hosborn/.isoclassify"
CONSTRAINTS = [
'teff','logg','feh','lum','gmag','rmag','imag','zmag','jmag','hmag','kmag',
'gamag','bpmag','rpmag','parallax', 'bmag','vmag', 'btmag','vtmag','numax','dnu'
]
COORDS = ['ra','dec']
DATADIR = "/Users/hosborn/.isoclassify"
def run(**kw):
if kw['method']=='direct':
pipe = PipelineDirect(**kw)
elif kw['method']=='grid':
pipe = PipelineGrid(**kw)
else:
assert False, "method {} not supported ".format(kw['method'])
pipe.run()
if pipe.plotmode=='show':
plt.ion()
plt.show()
input('[press return to continue]:')
elif pipe.plotmode.count('save')==1:
pipe.savefig()
pipe.to_csv()
def query_dustmodel_coords(ra,dec):
reddenMap = BayestarWebQuery(version='bayestar2017')
sightLines = SkyCoord(ra*units.deg,dec*units.deg,frame='icrs')
reddenContainer = reddenMap(sightLines,mode='best')
del reddenMap # To clear reddenMap from memory
distanceSamples = np.array([0.06309573,0.07943284,0.1,0.12589255,0.15848933,0.19952627,0.25118864,0.31622776,0.3981072,0.50118726,0.6309574,0.7943282 ,1.,1.2589258,1.5848933,1.9952621,2.511887,3.1622777,3.981073,5.011873,6.3095727,7.943284,10.,12.589258,15.848933,19.952621,25.11887,31.622776,39.81073,50.11873,63.095726])*1000. # In pc, from bayestar2017 map distance samples
dustModelDF = pd.DataFrame({'ra': [ra], 'dec': [dec]})
for index in range(len(reddenContainer)):
dustModelDF['av_'+str(round(distanceSamples[index],6))] = reddenContainer[index]
return dustModelDF
def query_dustmodel_coords_allsky(ra,dec):
reddenMap = mwdust.Combined15()
sightLines = SkyCoord(ra*units.deg,dec*units.deg,frame='galactic')
distanceSamples = np.array([0.06309573,0.07943284,0.1,0.12589255,0.15848933,0.19952627,0.25118864,0.31622776,0.3981072,0.50118726,0.6309574,0.7943282 ,1.,1.2589258,1.5848933,1.9952621,2.511887,3.1622777,3.981073,5.011873,6.3095727,7.943284,10.,12.589258,15.848933,19.952621,25.11887,31.622776,39.81073,50.11873,63.095726])*1000. # In pc, from bayestar2017 map distance samples
reddenContainer=reddenMap(sightLines.l.value,sightLines.b.value,distanceSamples/1000.)
del reddenMap # To clear reddenMap from memory
dustModelDF = | pd.DataFrame({'ra': [ra], 'dec': [dec]}) | pandas.DataFrame |
import requests
import pandas as pd
import html
from bs4 import BeautifulSoup
class DblpApi:
def __init__(self):
self.session = requests.Session()
self.author_url = 'http://dblp.org/search/author/api'
self.pub_url = 'http://dblp.org/search/publ/api'
def get_pub_list_by_url(self, url):
req = self.session.get(url)
soup = BeautifulSoup(req.content, 'html.parser')
pub_list = [article.get_text() for article in soup.select('span[class="title"]')]
return pub_list
def search_pub(self, pub_name):
params = {
'q': pub_name,
'format': 'json',
'h': 1000,
}
req = self.session.get(self.pub_url, params=params)
data = req.json()
print(data['result'])
def search_author(self, author_input):
# prepare the first query
params = {
'q': author_input,
'format': 'json',
'h': 1000,
}
req = self.session.get(self.author_url, params=params)
data = req.json()
if data['result']['status']['@code'] == '200':
# split the input author name
author_input_list = author_input.split(' ')
author_input_length = len(author_input_list)
# if the first query got no result and the name is ended with a identifier, remove the identifier and retry
if data['result']['hits']['@total'] == '0' and author_input_list[-1].isdigit():
author_input_length -= 1
author_input_list = author_input_list[:author_input_length]
params = {
'q': ' '.join(author_input_list),
'format': 'json',
'h': 1000,
}
req = self.session.get(self.author_url, params=params)
data = req.json()
author_identical = []
curr_counter = data['result']['hits']['@sent']
while True:
# iterate through all result
for author in data['result']['hits']['hit']:
author_info = author['info']
unescaped_name = html.unescape(author_info['author'])
author_name_list = unescaped_name.split(' ')
found = False
# the case that the two names match exactly.
if author_input_list == author_name_list:
# author_identical.append((author_info['author'], author['@id'], author_info['url']))
found = True
# it's a duplicate name in the form of the exact name follow by an four digits identifier.
elif author_input_length + 1 == len(author_name_list) and author_name_list[-1].isdigit():
if author_input_list == author_name_list[:author_input_length]:
# author_identical.append((author_info['author'], author['@id'], author_info['url']))
found = True
# # middle name case, doesn't work for Chinese names.
# elif len(author_name_list) == 3 and author_input_length == 2 and not author_name_list[-1].isdigit():
# if author_name_list[0] == author_name_list[0] and author_name_list[2] == author_name_list[2]:
# found = True
if found:
author_identical.append(
(author_info['author'], author['@id'], author_info['url'], author_input))
# the case that the author has name aliases
elif not found and 'aliases' in author_info:
alias = author_info['aliases']['alias']
# the author has one alias, and it matches the name exactly
if isinstance(alias, str) and html.unescape(alias) == author_input:
author_identical.append(
(author_info['author'], author['@id'], author_info['url'], author_input))
# the author has a list of aliases
elif isinstance(alias, list):
for a in alias:
a_list = html.unescape(a).split(' ')
if a_list == author_input_list:
author_identical.append(
(author_info['author'], author['@id'], author_info['url'], author_input))
elif author_input_length + 1 == len(a_list) and a_list[-1].isdigit():
if author_input_list == a_list[:author_input_length]:
author_identical.append(
(author_info['author'], author['@id'], author_info['url'], author_input))
if curr_counter < data['result']['hits']['@total']:
params = {
'q': author_input,
'format': 'json',
'h': 1000,
'f': curr_counter,
}
req = self.session.get(self.author_url, params=params)
data = req.json()
if data['result']['hits']['@sent'] == '0':
break
curr_counter += data['result']['hits']['@sent']
else:
break
return author_identical
if __name__ == '__main__':
dblp = DblpApi()
df_authors = pd.read_pickle('authors.pkl')
# df_bad = pd.read_pickle('bad.pkl')
#
# df_bad.rename({0: 'author'}, inplace=True, axis=1)
#
# counter = 0
# for row in df_bad.iterrows():
# # print(df_authors[df_authors['author'] == row[1]['author']])
# x = dblp.search_author(row[1]['author'])
# if len(x) == 0:
# counter += 1
# print(df_authors[df_authors['author'] == row[1]['author']])
#
# print(counter)
# df_article = pd.read_pickle('dblp_article_multi_author.pkl')
# print(df_article['title'].iloc[0])
# dblp.search_pub('Object Data Model Facilities for Multimedia Data Types.')
# print(df_authors['author'][15623])
# n = '<NAME>'
#
# x = dblp.search_author(n)
#
# print(x)
#
# req = dblp.session.get(x[0][2])
#
# soup = BeautifulSoup(req.content, 'html.parser')
#
# for y in soup.select('span[class="title"]'):
# print(y.get_text())
#
# pub_author_map = [(article.get_text(), x[0][2]) for article in soup.select('span[class="title"]')]
#
# print(pub_author_map)
# for x in soup.select('#publ-section > div:nth-child(2) > div > ul'):
# print(x)
# for decades in soup.find_all('ul', {'class': 'publ-list'}):
# for decade in decades:
# for year in decade:
# print(year)
# print('----------------------------------')
# print('111111111')
upper_bound = 100000
author_found = []
author_not_found = []
author_bad_format = []
for name in df_authors['author'].tolist()[:upper_bound]:
try:
identical_authors = dblp.search_author(name)
if len(identical_authors) == 0:
author_not_found.append(name)
else:
author_found.extend(identical_authors)
except Exception:
print(f'name {name} does not work.')
author_bad_format.append(name)
print(f'There are {len(author_not_found)} bad data.')
print(author_not_found)
df_not_found = | pd.DataFrame(author_not_found) | pandas.DataFrame |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = | pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys, os
import re
import datetime as dt
import lxml.html
import requests
import itertools
import glob
import codecs
import html
from . import patterns
from . import fixes
from . import passage
flatten = lambda x: list(itertools.chain.from_iterable(x))
end_punctuation = [".", "!", "?", "-", "]", u"—", u"–", '"', ",", ":", ")", ";"]
not_header = ["Thank you", "We'll", "What's", "Item", "any questions", "involving", "dated"]
safe_speakers = [
'White House Press Secretary James "Jay" Carney',
'White House Press Secretary <NAME>. "Jay" Carney',
'White House Principal Deputy Press Secretary <NAME>',
'<NAME>, Secretary To The President',
'White House Press Secretary <NAME>'
]
class TranscriptSet(object):
def __init__(self, transcripts):
self.transcripts = transcripts
def to_csv(self, dest, **kwargs):
import pandas as pd
passages = flatten([ [ {
"primary_speakers": t.primary_speaker,
"date": t.date,
"location": t.location,
"start_time": t.start_time,
"end_time": t.end_time,
"speaker": (p.speaker or ""),
"question": p.is_question,
"text": (p.text or "")
} for p in t.passages ]
for t in self.transcripts ])
df = | pd.DataFrame(passages) | pandas.DataFrame |
# TODO move away from this test generator style since its we need to manage the generator file,
# which is no longer in this project workspace, as well as the output test file.
## ##
# #
# THIS TEST WAS AUTOGENERATED BY groupby_test_generator.py #
# #
##
# TODO refactor this into table driven tests using pytest parameterize since each test body follows the same structure
# and a single test body with multiple test tabe entries will be more readable and flexible.
from .groupby_unit_test_parameters import *
import pandas as pd
import riptable as rt
import unittest
class autogenerated_gb_tests(unittest.TestCase):
def safe_assert(self, ary1, ary2):
for a, b in zip(ary1, ary2):
if a == a and b == b:
self.assertAlmostEqual(a, b, places=7)
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(1, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(4, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(7, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(2, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(5, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(1, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(4, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(7, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(2, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(5, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(1, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(4, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(7, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(2, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(5, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(1, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(4, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(7, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(2, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(5, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(1, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(4, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(7, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(2, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(5, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(1, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(4, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(7, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(2, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(5, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 3, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 3, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(1, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(4, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(7, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(2, 2, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(5, 2, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(1, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(4, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(7, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(2, 1, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(5, 1, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(1, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(4, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(7, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(2, 3, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(5, 3, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['min', 'max']
test_class = groupby_everything(1, 1, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['min', 'max']
test_class = groupby_everything(4, 1, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['min', 'max']
test_class = groupby_everything(7, 1, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['min', 'max']
test_class = groupby_everything(2, 2, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['min', 'max']
test_class = groupby_everything(5, 2, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['min', 'max']
test_class = groupby_everything(1, 3, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['min', 'max']
test_class = groupby_everything(4, 3, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['min', 'max']
test_class = groupby_everything(7, 3, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['min', 'max']
test_class = groupby_everything(2, 1, 0.30, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['min', 'max']
test_class = groupby_everything(5, 1, 0.30, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['min', 'max']
test_class = groupby_everything(1, 2, 0.30, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['min', 'max']
test_class = groupby_everything(4, 2, 0.30, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['min', 'max']
test_class = groupby_everything(7, 2, 0.30, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['min', 'max']
test_class = groupby_everything(2, 3, 0.30, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['min', 'max']
test_class = groupby_everything(5, 3, 0.30, ['min', 'max'])
pd_out = (
| pd.DataFrame(test_class.data) | pandas.DataFrame |
import pandas as pd
#importing all the data from CSV files
master_df = pd.read_csv('People.csv', usecols=['playerID', 'nameFirst', 'nameLast', 'bats', 'throws', 'debut', 'finalGame'])
fielding_df = pd.read_csv('Fielding.csv',usecols=['playerID','yearID','stint','teamID','lgID','POS','G','GS','InnOuts','PO','A','E','DP'])
batting_df = pd.read_csv('Batting.csv')
awards_df = pd.read_csv('AwardsPlayers.csv', usecols=['playerID','awardID','yearID'])
allstar_df = pd.read_csv('AllstarFull.csv', usecols=['playerID','yearID'])
hof_df = pd.read_csv('HallOfFame.csv',usecols=['playerID','yearid','votedBy','needed_note','inducted','category'])
appearances_df = | pd.read_csv('Appearances.csv') | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = | pd.array([-1, 0, 1, None, 2], dtype="Int64") | pandas.array |
# -*- coding: utf-8 -*-
import sys, os
import pandas as pd
import numpy as np
from data_factory.temperature_spider import getTemperatureData
def loadNTL(path):
lineloss = pd.read_csv(path)
lineloss['Date'] = pd.to_datetime(lineloss['Date'])
lineloss = lineloss.sort_values(['AreaID', 'Date'])
lineloss['Date'] = lineloss['Date'].astype(int)
lineloss['Date'] = (lineloss['Date'] / 1e9).astype(int)
return lineloss
def loadUser(path):
if os.path.isdir(path):
files = os.listdir(path)
userdata = []
for file in files:
temp = pd.read_csv(os.path.join(path, file))
userdata.append(temp)
userdata = | pd.concat(userdata, ignore_index=True) | pandas.concat |
# Module: internal.ensemble
# Provides an Ensemble Forecaster supporting voting, mean and median methods.
# This is a reimplementation from Sktime original EnsembleForecaster.
# This Ensemble is only to be used internally.
import pandas as pd
import numpy as np
import warnings
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._meta import _HeterogenousEnsembleForecaster
_ENSEMBLE_METHODS = ["voting", "mean", "median"]
class _EnsembleForecasterWithVoting(_HeterogenousEnsembleForecaster):
"""
Ensemble of forecasters.
Parameters
----------
forecasters : list of (str, estimator) tuples
method : {'mean', 'median', 'voting'}, default='mean'
Specifies the ensemble method type to be used.
It must be one of 'mean', 'median', or 'voting.
If none is given, 'mean' will be used.
weights : array-like of shape (n_estimators,), default=None
A sequence of weights (`float` or `int`) to weight the occurrences of
predicted values before averaging. This parameter is only valid for
'voting' method, uses uniform weights for 'voting' method if None.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for fit. None means 1 unless
in a joblib.parallel_backend context.
-1 means using all processors.
"""
_required_parameters = ["forecasters"]
_not_required_weights = ["mean", "median"]
_required_weights = ["voting", "mean"]
_available_methods = ["voting", "mean", "median"]
def __init__(self, forecasters, method="mean", weights=None, n_jobs=None):
self.forecasters = forecasters
self.method = method
self.weights = weights
super(_EnsembleForecasterWithVoting, self).__init__(
forecasters=self.forecasters, n_jobs=n_jobs
)
@property
def weights(self):
return self._weights
@weights.setter
def weights(self, value):
self._weights = value
def _check_method(self):
if self.method == "voting" and self.weights is None:
warnings.warn("Missing 'weights' argument, setting uniform weights.")
self.weights = np.ones(len(self.forecasters))
elif self.method in self._not_required_weights and self.weights:
warnings.warn(
"Unused 'weights' argument. When method='mean' or method='median', 'weights' argument is not provided. Setting weights to `None`"
)
self.weights = None
elif self.method not in self._available_methods:
raise ValueError(
f"Method {self.method} is not supported. Available methods are {', '.join(self._available_methods)}"
)
def _check_weights(self):
if self.weights is not None and len(self.weights) != len(self.forecasters):
raise ValueError(
f"Number of forecasters and weights must be equal, got {len(self.weights)} weights and {len(self.estimators)} estimators"
)
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
names, forecasters = self._check_forecasters()
self._fit_forecasters(forecasters, y, X, fh)
return self
def update(self, y, X=None, update_params=True):
"""Update fitted parameters
Parameters
----------
y : pd.Series
X : pd.DataFrame
update_params : bool, optional (default=True)
Returns
-------
self : an instance of self
"""
for forecaster in self.forecasters_:
forecaster.update(y, X, update_params=update_params)
return self
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
if return_pred_int:
raise NotImplementedError()
self._check_method()
pred_forecasters = pd.concat(self._predict_forecasters(fh, X), axis=1)
if self.method == "median":
return pd.Series(pred_forecasters.median(axis=1))
elif self.method in self._required_weights:
self._check_weights()
pred_w = np.average(pred_forecasters, axis=1, weights=self.weights)
return | pd.Series(pred_w, index=pred_forecasters.index) | pandas.Series |
from os import makedirs, path
from typing import Union
import pandas as pd
from .filetype import FileType
class DataReader(object):
def __init__(self):
"""
Stores all dataframes and provides methods to feed data into the dataframes.
"""
self.bus_lines = pd.DataFrame(columns=['id', 'name', 'color', 'card_only', 'category'])
self.bus_line_shapes = pd.DataFrame(columns=['id', 'bus_line_id', 'latitude', 'longitude'])
self.bus_stops = pd.DataFrame(columns=['number', 'name', 'type', 'latitude', 'longitude'])
self.itineraries = pd.DataFrame(columns=['id', 'bus_line_id', 'direction'])
self.itinerary_stops = pd.DataFrame(columns=['itinerary_id', 'sequence_number', 'stop_number'])
self.bus_lines_schedule_tables = pd.DataFrame(columns=['table_id', 'bus_line_id', 'bus_stop_id', 'day_type',
'time', 'adaptive'])
self.vehicles_schedule_tables = pd.DataFrame(columns=['table_id', 'bus_line_id', 'bus_stop_id', 'vehicle_id',
'time'])
self.itinerary_stops_extra = pd.DataFrame(columns=['itinerary_id', 'itinerary_name', 'bus_line_id',
'itinerary_stop_id', 'stop_name', 'stop_name_short',
'stop_name_abbr', 'bus_stop_id', 'sequence_number', 'type',
'special_stop'])
self.itinerary_distances = pd.DataFrame(columns=['itinerary_stop_id', 'itinerary_next_stop_id', 'distance_m'])
self.companies = pd.DataFrame(columns=['id', 'name'])
self.itinerary_stops_companies = pd.DataFrame(columns=['itinerary_stop_id', 'company_id'])
self.vehicle_log = pd.DataFrame(columns=['timestamp', 'vehicle_id', 'bus_line_id', 'latitude', 'longitude'])
self.points_of_interest = pd.DataFrame(columns=['name', 'description', 'category', 'latitude', 'longitude'])
def feed_data(self, file: Union[bytes, str], data_type: FileType):
"""
Feeds data into the reader's internal dataframes.
:param file: File which contains the data.
If a *bytes* object is provided, the object will be interpreted as the actual decompressed content of the file.
Alternatively, if a *str* object is provided, the object will be interpreted as the path to a file in the user's
operating system. Supports the same compression types supported by pandas.
:param data_type: Type of data. See :class:`FileType` for available types
"""
# User provided raw binary data or file path (both are supported by pandas)
if isinstance(file, bytes) or isinstance(file, str):
# pd.read_json can take a long time. Therefore, we only read the file if the data_type parameter is valid.
if data_type == FileType.LINHAS:
file_data = pd.read_json(file)
self._feed_linhas_json(file_data)
elif data_type == FileType.POIS:
file_data = | pd.read_json(file) | pandas.read_json |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = | com._asarray_tuplesafe(keyarr) | pandas.core.common._asarray_tuplesafe |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 14:48:57 2021
@author: <NAME>
"""
import pandas as pd, numpy as np, os, igraph as ig, leidenalg as la
import cvxpy as cp
from sklearn.neighbors import NearestNeighbors, radius_neighbors_graph
from kneed import KneeLocator
from sklearn.utils.validation import check_symmetric
from scipy.sparse import csr_matrix
from matplotlib import pyplot as plt
from sklearn.neighbors import kneighbors_graph
from Bipartite_Ensembling import BGPA
def read_in_data(directory_names, years):
data = {}
for year in years:
data_modes=[]
for directory in directory_names:
for filename in os.listdir(os.path.join('C:\\Users\\Gian Maria\\Desktop\\Unitn\\Iain\\CORRECT_DATA\\Data', directory)):
if year in filename:
datum = pd.read_csv(os.path.join('C:\\Users\\Gian Maria\\Desktop\\Unitn\\Iain\\CORRECT_DATA\\Data',directory, filename), index_col=0)
datum.fillna(value=0, inplace=True)
data_modes.append(datum)
data_modes_index = np.unique(np.concatenate([mode.index for mode in data_modes]))
data_modes = [mode.reindex(data_modes_index) for mode in data_modes]
data_modes = [mode.fillna(value=0) for mode in data_modes]
data[year] = data_modes.copy()
return data
class Leiden_Unimodal:
def __init__(self, obj_type='RB_Mod', resolution=1.0, n_iterations =-1):
obj_types = {'CPM': la.CPMVertexPartition,
'RBER': la.RBERVertexPartition,
'RB_Mod': la.RBConfigurationVertexPartition,
'Mod': la.ModularityVertexPartition,
'Surprise': la.SurpriseVertexPartition
}
self.obj_type = obj_type
self.obj_func = obj_types[obj_type]
self.resolution = resolution
self.n_iterations = n_iterations
def fit_transform(self, graph):
if type(graph) is ig.Graph:
G =graph
else:
G = self._scipy_to_igraph(graph)
if self.obj_type in ['CPM', 'RBER', 'RB_Mod']:
partition = la.find_partition(G, self.obj_func, n_iterations=self.n_iterations,
resolution_parameter=self.resolution)
else:
partition = la.find_partition(G, self.obj_func, n_iterations=self.iterations)
self.modularity_ = partition.quality()
self.labels_ = np.array(partition.membership)
return self.labels_
def _scipy_to_igraph(self, matrix):
# matrix.eliminate_zeros()
sources, targets = matrix.nonzero()
weights = matrix[sources, targets]
graph = ig.Graph(n=matrix.shape[0], edges=list(zip(sources, targets)), directed=True, edge_attrs={'weight': weights})
try:
check_symmetric(matrix, raise_exception=True)
graph = graph.as_undirected()
except ValueError:
pass
return graph
class Leiden_Multiplex:
def __init__(self, obj_types=None, resolutions=None, modal_weights=None, n_iterations=-1):
self.obj_types = obj_types
self.resolutions = resolutions
self.modal_weights = modal_weights
self.n_iterations = n_iterations
def fit_transform(self, graphs):
obj_table = {'CPM': la.CPMVertexPartition,
'RBER': la.RBERVertexPartition,
'RB_Mod': la.RBConfigurationVertexPartition,
'Mod': la.ModularityVertexPartition,
'Surprise': la.SurpriseVertexPartition
}
G=[]
for graph in graphs:
if type(graph) is ig.Graph:
G.append(graph)
else:
G.append(self._scipy_to_igraph(graph))
optimiser = la.Optimiser()
partitions = []
for i in range(len(G)):
if self.obj_types is None:
partitions.append(la.RBConfigurationVertexPartition(G[i], resolution_parameter=1.0))
elif self.resolutions is None:
obj = obj_table[self.obj_types[i]]
partitions.append(obj(G[i]))
else:
obj = obj_table[self.obj_types[i]]
partitions.append(obj(G[i], resolution_parameter=self.resolutions[i]))
if self.modal_weights is None:
diff = optimiser.optimise_partition_multiplex(partitions, n_iterations=self.n_iterations)
else:
diff = optimiser.optimise_partition_multiplex(partitions, layer_weights = self.modal_weights, n_iterations=self.n_iterations)
self.modularities = [part.modularity for part in partitions]
self.labels_ = np.array(partitions[0].membership)
return self.labels_
def _scipy_to_igraph(self, matrix):
matrix.eliminate_zeros()
sources, targets = matrix.nonzero()
weights = matrix[sources, targets]
graph = ig.Graph(n=matrix.shape[0], edges=list(zip(sources, targets)), directed=True, edge_attrs={'weight': weights})
try:
check_symmetric(matrix, raise_exception=True)
graph = graph.as_undirected()
except ValueError:
pass
return graph
class MVMC:
def __init__(self, n_iterations=-1, max_clusterings=20,
resolution_tol=1e-2, weight_tol=1e-2, verbose=False):
self.n_iterations = n_iterations
self.max_clusterings = max_clusterings
self.resolution_tol = resolution_tol
self.weight_tol = weight_tol
self.verbose = verbose
def fit_transform(self, graphs):
G=[]
for graph in graphs:
if type(graph) is ig.Graph:
G.append(graph)
else:
G.append(self._scipy_to_igraph(graph))
if self.verbose:
for i in range(len(G)):
print("View Graph {}: num_nodes: {}, num_edges: {}, directed: {}, num_components: {}, num_isolates: {}"
.format(i, G[i].vcount(), G[i].ecount(), G[i].is_directed(),
len(G[i].components(mode='WEAK').sizes()), G[i].components(mode='WEAK').sizes().count(1)))
self.weights = []
self.resolutions =[]
self.best_modularity =-np.inf
self.best_clustering = None
self.best_resolutions = None
self.best_weights = None
self.modularities =[]
self.clusterings =[]
self.final_iteration = 0
self.best_iteration = 0
weights = [1]*len(G)
resolutions =[1]*len(G)
for iterate in range(self.max_clusterings):
partitions = []
for i in range(len(G)):
partitions.append(la.RBConfigurationVertexPartition(G[i], resolution_parameter=resolutions[i]))
optimiser = la.Optimiser()
diff = optimiser.optimise_partition_multiplex(partitions, layer_weights = weights, n_iterations=self.n_iterations)
self.clusterings.append(np.array(partitions[0].membership))
self.modularities.append([part.quality()/(part.graph.ecount() if part.graph.is_directed() else 2*part.graph.ecount())
for part in partitions])
self.weights.append(weights.copy())
self.resolutions.append(resolutions.copy())
self.final_iteration +=1
if self.verbose:
print("--------")
print("Iteration: {} \n Modularities: {} \n Resolutions: {} \n Weights: {}"
.format(self.final_iteration, self.modularities[-1], resolutions, weights))
# if np.sum(np.array(self.weights[-1]) * np.array(self.modularities[-1])) > self.best_modularity:
self.best_clustering = self.clusterings[-1]
self.best_modularity = np.sum(np.array(self.weights[-1]) * np.array(self.modularities[-1]))
self.best_resolutions = self.resolutions[-1]
self.best_weights = self.weights[-1]
self.best_iteration = self.final_iteration
theta_in, theta_out = self._calculate_edge_probabilities(G)
for i in range(len(G)):
resolutions[i] = (theta_in[i] - theta_out[i])/ (np.log(theta_in[i]) - np.log(theta_out[i]))
weights[i] = (np.log(theta_in[i]) - np.log(theta_out[i]))/(np.mean([np.log(theta_in[j]) - np.log(theta_out[j]) for j in range(len(G))]))
if (np.all(np.abs(np.array(self.resolutions[-1])-np.array(resolutions)) <= self.resolution_tol)
and np.all(np.abs(np.array(self.weights[-1])-np.array(weights)) <= self.resolution_tol)):
break
else:
best_iteration = np.argmax([np.sum(np.array(self.weights[i]) * np.array(self.modularities[i]))
for i in range(len(self.modularities))])
self.best_clustering = self.clusterings[best_iteration]
self.best_modularity = np.sum(np.array(self.weights[best_iteration]) * np.array(self.modularities[best_iteration]))
self.best_resolutions = self.resolutions[best_iteration]
self.best_weights = self.weights[best_iteration]
self.best_iteration = best_iteration
if self.verbose:
print("MVMC did not converge, best result found: Iteration: {}, Modularity: {}, Resolutions: {}, Weights: {}"
.format(self.best_iteration, self.best_modularity, self.best_resolutions, self.best_weights))
return self.best_clustering
def _scipy_to_igraph(self, matrix):
matrix.eliminate_zeros()
sources, targets = matrix.nonzero()
weights = list(matrix.data)
graph = ig.Graph(n=matrix.shape[0], edges=list(zip(sources, targets)), directed=True, edge_attrs={'weight': weights})
try:
check_symmetric(matrix, raise_exception=True)
graph = graph.as_undirected()
except ValueError:
pass
if not graph.is_weighted():
graph.es['weight'] = [1.0] * graph.ecount()
return graph
def _calculate_edge_probabilities(self, G):
theta_in =[]
theta_out =[]
clusters = self.clusterings[-1].copy()
for i in range(len(G)):
m_in = 0
m = sum(e['weight'] for e in G[i].es)
kappa =[]
G[i].vs['clusters'] = clusters
for cluster in np.unique(clusters):
nodes = G[i].vs.select(clusters_eq=cluster)
m_in += sum(e['weight'] for e in G[i].subgraph(nodes).es)
if G[i].is_directed():
degree_products = np.outer(np.array(G[i].strength(nodes, mode = 'IN', weights='weight')),
np.array(G[i].strength(nodes, mode = 'OUT', weights='weight')))
np.fill_diagonal(degree_products,0)
kappa.append(np.sum(degree_products, dtype=np.int64))
else:
kappa.append(np.sum(np.array(G[i].strength(nodes, weights='weight')), dtype=np.int64)**2)
if G[i].is_directed():
if m_in <=0:
# Case when there are no internal edges; every node in its own cluster
theta_in.append(1/G[i].ecount())
else:
theta_in.append((m_in)/(np.sum(kappa, dtype=np.int64)/(2*m)))
if m-m_in <=0:
# Case when all edges are internal; 1 cluster or a bunch of disconnected clusters
theta_out.append(1/G[i].ecount())
else:
theta_out.append((m-m_in)/(m-np.sum(kappa, dtype=np.int64)/(2*m)))
else:
if m_in <=0:
# Case when there are no internal edges; every node in its own cluster
theta_in.append(1/G[i].ecount())
else:
theta_in.append((m_in)/(np.sum(kappa, dtype=np.int64)/(4*m)))
if m-m_in <=0:
# Case when all edges are internal; 1 cluster or a bunch of disconnected clusters
theta_out.append(1/G[i].ecount())
else:
theta_out.append((m-m_in)/(m-np.sum(kappa, dtype=np.int64)/(4*m)))
return theta_in, theta_out
def create_neighbors_plot(list_of_dfs, metric='cosine'):
fig = plt.figure(figsize=(20, 10))
fig.subplots_adjust(hspace=.5, wspace=.2)
i = 1
for df in list_of_dfs:
X = df.iloc[:,2:].values
n_neighbors = int(np.ceil(np.log2(X.shape[0])))
nearest_neighbors = NearestNeighbors(n_neighbors=n_neighbors, metric=metric, n_jobs=-1)
neighbors = nearest_neighbors.fit(X)
distances, indices = neighbors.kneighbors(X)
distances = np.sort(distances[:,n_neighbors-1], axis=0)
d = np.arange(len(distances))
knee = KneeLocator(d, distances, S=1, curve='convex', direction='increasing', interp_method='polynomial')
#ax = fig.add_subplot(2, 5, i)
#ax.plot(distances)
print("knee value: {}".format(distances[knee.knee]))
knee.plot_knee()
#ax.set_xlabel("Points")
#ax.set_ylabel("Distance")
i +=1
def create_nearest_neighbors_graph(list_of_dfs, metric='cosine'):
graphs = []
for df in list_of_dfs:
X = df.values
'''Row normalize the data'''
#X = normalize(X, axis=1, norm='l1')
'''Create a k-nearest neighbors graph'''
n_neighbors = int(np.ceil(np.log2(X.shape[0])))
graph = kneighbors_graph(X, n_neighbors=n_neighbors, metric=metric,
mode='distance')
'''converting to similarity and limit to only edges where there is overlap in the
feature space'''
graph.data = 1-graph.data
graph.eliminate_zeros()
graph.data = (graph.data - np.min(graph.data)) / (np.max(graph.data) - np.min(graph.data))
'''symmetrizing the graphs'''
#graph = 0.5 * (graph + graph.T)
#graph = graph.minimum(graph.T)
#graph = graph.maximum(graph.T)
#graph.eliminate_zeros()
graphs.append(graph)
return graphs
def create_radius_ball_graph(list_of_dfs, metric='euclidean'):
graphs = []
for df in list_of_dfs:
#X = df.iloc[:,2:].values
X = df.values
'''Row normalize the data'''
#X = normalize(X, axis=1, norm='l1')
'''Create radius ball graph'''
n_neighbors = int(np.ceil(np.log2(X.shape[0])))
nearest_neighbors = NearestNeighbors(n_neighbors=n_neighbors, metric=metric, n_jobs=-1)
neighbors = nearest_neighbors.fit(X)
distances, indices = neighbors.kneighbors(X)
distances = np.sort(distances[:,n_neighbors-1], axis=0)
d = np.arange(len(distances))
knee = KneeLocator(d, distances, S=1, curve='convex', direction='increasing', interp_method='polynomial')
graph = radius_neighbors_graph(X, radius = distances[knee.knee], metric=metric, mode='distance')
'''converting to similarity and limit to only edges where there is overlap in the
feature space'''
graph.data = np.around(np.exp(-0.5 * graph.data / np.std(graph.data)), decimals=4)
#graph.data = 1-graph.data
#graph.eliminate_zeros()
#graph.data = (graph.data - np.min(graph.data)) / (np.max(graph.data) - np.min(graph.data))
'''symmetrizing the graphs'''
#graph = 0.5 * (graph + graph.T)
#graph = graph.minimum(graph.T)
#graph = graph.maximum(graph.T)
#graph.eliminate_zeros()
graphs.append(graph)
return graphs
def create_lrr_sparse_graph(list_of_dfs):
graphs = []
for df in list_of_dfs:
X = df.values
n = X.shape[0]
m = X.shape[1]
W = cp.Variable(shape=(n,n))
obj = cp.Minimize(cp.norm(cp.norm(W@X - X, p=2, axis=0), p=1)+ 100*cp.norm(W, p=1))
constraint = [cp.diag(W)==0]
prob = cp.Problem(obj, constraint)
optimal_value = prob.solve()
graph = np.round((np.abs(W.value) + np.transpose(np.abs(W.value)))/2, 2)
graphs.append(csr_matrix(graph))
return graphs
def pd_fill_diagonal(df_matrix, value=0):
mat = df_matrix.values
n = mat.shape[0]
mat[range(n), range(n)] = value
return pd.DataFrame(mat)
def projected_graph(list_of_dfs):
proj_graphs = []
for df in list_of_dfs:
df = df.dot(df.T)
pd_fill_diagonal(df, value=0)
df.fillna(0, inplace=True)
df=df.div(df.sum(axis=1),axis=0)
df = 0.5*(df+df.T)
graph = csr_matrix(df.values)
graph.data[np.isnan(graph.data)] = 0.0
proj_graphs.append(graph)
return proj_graphs
def scipy_to_igraph(matrix):
matrix.eliminate_zeros()
sources, targets = matrix.nonzero()
weights = list(matrix.data)
graph = ig.Graph(n=matrix.shape[0], edges=list(zip(sources, targets)), directed=True, edge_attrs={'weight': weights})
try:
check_symmetric(matrix, raise_exception=True)
graph = graph.as_undirected()
except ValueError:
pass
if not graph.is_weighted():
graph.es['weight'] = [1.0] * graph.ecount()
return graph
def get_graph_stats(yearly_networks, view_names):
for year in yearly_networks.keys():
for i in range(len(view_names)):
datum ={}
datum['Key'] = year+"_"+view_names[i]
datum['Year'] = year
datum['View'] = view_names[i]
G = scipy_to_igraph(yearly_networks[year][i])
datum['Num_Nodes'] = G.vcount()
datum['Num_Edges'] = G.ecount()
datum['Density'] = G.density()
datum['Num_Components'] = len(G.components(mode='WEAK').sizes())
datum['Num_Isolates'] = G.components(mode='WEAK').sizes().count(1)
datum['Clustering_Coefficient'] = G.transitivity_undirected(mode="zero")
datum['Average Path Length'] = G.average_path_length(directed=False)
datum['Avg Neighbors'] = G.knn(vids=None)
datum['Assortativity'] = G.assortativity_degree(directed = False)
yield datum
def find_multi_view_clusters(names, graphs, view_names, num_clusterings=10):
ensembler = BGPA()
modularities = []
resolutions =[]
weights = []
iterations = []
clusterings = []
mvmc_clstr= MVMC(resolution_tol=0.01, weight_tol=0.01, max_clusterings=40)
for _ in range(num_clusterings):
community_labels = mvmc_clstr.fit_transform(graphs)
clusterings.append(community_labels)
modularities.append(mvmc_clstr.modularities[-1])
resolutions.append(mvmc_clstr.resolutions[-1])
weights.append(mvmc_clstr.weights[-1])
iterations.append(mvmc_clstr.final_iteration)
performance_results ={}
performance_results['view_names'] = view_names
performance_results['modularity'] = np.average(np.array(modularities))
performance_results['resolution'] = np.average(np.array(resolutions), axis=0)
performance_results['weights'] = np.average(np.array(weights), axis=0)
performance_results['iterations'] = np.average(np.array(iterations))
return pd.DataFrame(index = names, data = ensembler.fit_predict(clusterings)), | pd.DataFrame(performance_results) | pandas.DataFrame |
import copy
import warnings
import pprint
import numpy as np
import pandas as pd
from chemml.wrapper.database import sklearn_db
from chemml.wrapper.database import chemml_db
from chemml.wrapper.database import pandas_db
# todo: decorate some of the steps in the wrapeprs. e.g. sending out ouputs by finding all the connected edges in the graph
# todo: use Input and Output classes to handle inputs and outputs
class BASE(object):
"""
Do not instantiate this class
"""
def __init__(self, Base, parameters, iblock, Task, Function, Host):
self.Base = Base
self.parameters = parameters
self.iblock = iblock
self.Task = Task
self.Function = Function
self.Host = Host
def run(self):
self.IO()
self.Receive()
self.fit()
def IO(self):
if self.Host == 'sklearn':
self.metadata = getattr(sklearn_db, self.Function)()
elif self.Host == 'chemml':
self.metadata = getattr(chemml_db, self.Function)()
elif self.Host == 'pandas':
self.metadata = getattr(pandas_db, self.Function)()
self.inputs = {i:copy.deepcopy(vars(self.metadata.Inputs)[i]) for i in vars(self.metadata.Inputs).keys() if
i not in ('__dict__','__weakref__','__module__', '__doc__')}
self.outputs = {i:copy.deepcopy(vars(self.metadata.Outputs)[i]) for i in vars(self.metadata.Outputs).keys() if
i not in ('__dict__','__weakref__','__module__', '__doc__')}
# self.wparams = {i:copy.deepcopy(vars(self.metadata.WParameters)[i]) for i in vars(self.metadata.WParameters).keys() if
# i not in ('__module__', '__doc__')}
def Receive(self):
recv = [edge for edge in self.Base.graph if edge[2] == self.iblock]
# print(recv)
# print("self.inputs: ", self.inputs)
# print("self.Base.graph: ", self.Base.graph)
# print("self.iblock: ", self.iblock)
self.Base.graph = tuple([edge for edge in self.Base.graph if edge[2] != self.iblock])
# check received tokens to: (1) be a legal input, and (2) be unique.
count = {token: 0 for token in self.inputs}
for edge in recv:
if edge[3] in self.inputs:
count[edge[3]] += 1
if count[edge[3]] > 1:
msg = '@Task #%i(%s): only one input per each available input token can be received.' % (
self.iblock + 1, self.Task)
raise IOError(msg)
else:
msg = "@Task #%i(%s): received a non valid input token '%s', sent by block #%i" % (
self.iblock + 1, self.Task, edge[3], edge[0] + 1)
raise IOError(msg)
# print("recv: ", recv)
for edge in recv:
# print("edge: ", edge)
key = edge[0:2]
# print("self.Base.send: ", self.Base.send)
if key in self.Base.send:
if self.Base.send[key].count > 0:
value = self.Base.send[key].value
# Todo: add an option to deepcopy(value)
# print("type(value): ", type(value))
# print("value: ", value)
# print("input.types: ",self.inputs[edge[3]].types)
# print("We're here now!:", str(type(value)) in self.inputs[edge[3]].types)
# print("edge[3].value: ", edge[3].value)
# print("(type(edge[3].value): ", type(edge[3].value))
if str(type(value)) in self.inputs[edge[3]].types or \
len(self.inputs[edge[3]].types)==0:
self.inputs[edge[3]].value = value
self.inputs[edge[3]].fro = self.Base.send[key].fro
self.Base.send[key].count -= 1
else:
msg = "@Task #%i(%s): The input token '%s' doesn't support the received format" % (
self.iblock + 1, self.Task, edge[3])
raise IOError(msg)
if self.Base.send[key].count == 0:
del self.Base.send[key]
else:
msg = "@Task #%i(%s): no output has been sent to the input token '%s'" % (
self.iblock + 1, self.Task, edge[3])
raise IOError(msg)
def Send(self):
order = [edge[1] for edge in self.Base.graph if edge[0] == self.iblock]
for token in set(order):
if token in self.outputs:
if self.outputs[token].value is not None:
self.outputs[token].count = order.count(token)
self.Base.send[(self.iblock, token)] = self.outputs[token]
else:
msg = "@Task #%i(%s): not allowed to send out empty objects '%s'" % (
self.iblock + 1, self.Task, token)
warnings.warn(msg)
else:
msg = "@Task #%i(%s): not a valid output token '%s'" % (self.iblock + 1, self.Task, token)
raise NameError(msg)
def required(self, token, req=False):
"""
Tasks:
- check if input token is required
:param token: string, name of the input
:param req: Boolean, optional (default = False)
"""
if self.inputs[token].value is None:
if req:
msg = "@Task #%i(%s): The input '%s' is required." \
% (self.iblock + 1, self.Task, token)
raise IOError(msg)
def paramFROMinput(self):
for param in self.parameters:
# print(self.parameters[param])
# print(type(self.parameters[param]))
if isinstance(self.parameters[param], str):
if self.parameters[param][0]=='@':
token = self.parameters[param][1:].strip()
if token in self.inputs:
self.parameters[param] = self.inputs[token].value
else:
msg = "@Task #%i(%s): assigned an unknown token name - %s - to the parameter - %s - " \
% (self.iblock + 1, self.Task, token, param)
raise IOError(msg)
def set_value(self,token,value):
# print("token: ", token)
# print("type(value): ", type(value))
# print("value: ", value)
# print("output.types: ",self.outputs[token].types)
# print("We're here now!:", str(type(value)) in self.outputs[token].types)
self.outputs[token].fro = (self.iblock,self.Host,self.Function)
if str(type(value)) in self.outputs[token].types or \
len(self.outputs[token].types)==0:
self.outputs[token].value = value
else:
msg = "@Task #%i(%s): The output token '%s' doesn't support the type" % (
self.iblock, self.Host, token)
raise IOError(msg)
def import_sklearn(self):
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
return api
def Fit_sklearn(self):
self.paramFROMinput()
if 'track_header' in self.parameters:
self.header = self.parameters.pop('track_header')
else:
self.header = True
if 'func_method' in self.parameters:
self.method = self.parameters.pop('func_method')
else:
self.method = None
available_methods = self.metadata.WParameters.func_method.options
if self.method not in available_methods:
msg = "@Task #%i(%s): The method '%s' is not available for the function '%s'." % (
self.iblock, self.Task,self.method,self.Function)
raise NameError(msg)
# methods: fit, predict, None for regression
# methods: fit_transform, transform, inverse_transform, None for transformers
if self.method == None:
api = self.import_sklearn()
self.set_value('api', api)
elif self.method == 'fit_transform':
api = self.import_sklearn()
self.required('df', req=True)
df = self.inputs['df'].value
df = api.fit_transform(df)
self.set_value('api', api)
self.set_value('df', pd.DataFrame(df))
elif self.method == 'transform':
self.required('df', req=True)
self.required('api', req=True)
df = self.inputs['df'].value
api = self.inputs['api'].value
df = api.transform(df)
self.set_value('api', api)
self.set_value('df', | pd.DataFrame(df) | pandas.DataFrame |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert | to_numeric('XX', errors='ignore') | pandas.to_numeric |
import datetime
from unittest.mock import patch, Mock
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas._testing import assert_series_equal, assert_frame_equal
from src.features.feature_engineering import delete_irrelevant_columns, scale_features, scale_feature_in_df, \
create_is_night_flight_feature, format_hour, convert_time_into_datetime, change_hour_format, \
add_night_flight_binary_feature, add_delay_binary_target, extracting_time_features_from_date, \
handle_missing_values, check_weekend, add_categorical_delay_target
TESTED_MODULE = 'src.features.feature_engineering'
def test_delete_irrelevant_columns_doit_renvoyer_le_dataset_d_entrainement_sans_la_colonne_niveau_de_securite():
df = pd.DataFrame({"IDENTIFIANT": ["1", "2"], "NIVEAU DE SECURITE": ["10", "10"]})
new_df = delete_irrelevant_columns(df)
assert "NIVEAU DE SECURITE" not in new_df.columns
@patch(f'{TESTED_MODULE}.scale_feature_in_df')
def test_scale_features__apply_scaling_to_features(m_scaling_feature):
# Given
df = pd.DataFrame({'feature1': [1, 10], 'feature2': [2, 20]})
features_to_scale = ['feature1', 'feature2']
path = 'fake_path'
# When
scale_features(df, features_to_scale, path)
# Then
m_scaling_feature.assert_any_call(df, 'feature1', 'fake_path', True)
m_scaling_feature.assert_called_with(df, 'feature2', 'fake_path', True)
@patch(f'{TESTED_MODULE}.load_scaler')
@patch(f'{TESTED_MODULE}.StandardScaler.fit')
@patch(f'{TESTED_MODULE}.StandardScaler')
def test_scale_feature_in_df___load_model_if_already_trained(m_scaler, m_fit_scaler, m_load):
# Given
m_scaler = Mock()
df = pd.DataFrame({'feature1': [1, 10, 100], 'feature2': [2, 20, 200]})
feature = 'feature1'
path = 'fake_path'
# When
scale_feature_in_df(df, 'feature1', path, False)
# Then
m_load.assert_called()
@patch(f'{TESTED_MODULE}.save_scaler')
@patch(f'{TESTED_MODULE}.load_scaler')
@patch(f'{TESTED_MODULE}.StandardScaler.fit')
@patch(f'{TESTED_MODULE}.StandardScaler')
def test_scale_feature_in_df___save_model_if_trained_for_the_first_time(m_scaler, m_fit_scaler, m_load, m_save):
# Given
m_scaler = Mock()
df = pd.DataFrame({'feature1': [1, 10, 100], 'feature2': [2, 20, 200]})
feature = 'feature1'
path = 'fake_path'
# When
scale_feature_in_df(df, 'feature1', path, True)
# Then
m_save.assert_called()
def test_create_is_night_flight_feature__fill_with_0_or_1_if_night_flight_between_2300_and_600():
# Given
feature = 'DEPART PROGRAMME'
is_night_flight_feature = "DEPART DE NUIT"
df = pd.DataFrame({'DEPART PROGRAMME': [2345, 2249, 504, 1000]})
expected_df_feature = pd.Series([1, 0, 1, 0], name="DEPART DE NUIT")
# When
create_is_night_flight_feature(feature, is_night_flight_feature, df)
# Then
| assert_series_equal(df[is_night_flight_feature], expected_df_feature) | pandas._testing.assert_series_equal |
#! /usr/bin/env python
from unittest import TestCase
import pandas as pd
import numpy as np
from pandashells.lib.lomb_scargle_lib import (
_next_power_two,
_compute_pad,
_compute_params,
lomb_scargle,
)
class NextPowerTwoTest(TestCase):
def test_proper_return(self):
past_100 = _next_power_two(100)
past_1000 = _next_power_two(1000)
self.assertEqual(past_100, 128)
self.assertEqual(past_1000, 1024)
class ComputePadTest(TestCase):
def test_exp0(self):
t = np.linspace(0, 10, 101)
t_pad, y_pad = _compute_pad(t)
dt = np.diff(t_pad)[-1]
self.assertAlmostEqual(dt, 0.1)
self.assertEqual(len(t_pad) + len(t), 128)
self.assertEqual(set(y_pad), {0.})
def test_exp2(self):
t = np.linspace(0, 10, 101)
t_pad, y_pad = _compute_pad(t, interp_exponent=2)
dt = np.diff(t_pad)[-1]
self.assertAlmostEqual(dt, 0.1)
self.assertEqual(len(t_pad) + len(t), 512)
self.assertEqual(set(y_pad), {0.})
class ComputeParamsTest(TestCase):
def test_proper_return(self):
t = np.linspace(0, 10, 101)
min_freq, d_freq, N = _compute_params(t)
self.assertAlmostEqual(min_freq, .1)
self.assertAlmostEqual(d_freq, 0.049504950495)
self.assertAlmostEqual(N, 101)
class LombScargleTest(TestCase):
def test_no_pad(self):
t = np.linspace(0, 10, 256)
y = 7 * np.sin(2 * np.pi * t)
df_in = | pd.DataFrame({'t': t, 'y': y}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
MOVE_SPEED_THRESHOLD = 5
STOP_SPEED_THRESHOLD = 0.5
PREVIOUS_OBSERVATIONS_TIME_FRAME = 5 # store N minutues of observations
def filter_previous_observations_by_timestamp(df):
if len(df) > 0:
return df[lambda x: x['timestamp'] >= (df['timestamp'] - pd.Timedelta(15, unit='m'))]
else:
return df
def is_sudden_stop(d, prev):
sog_mean = prev['sog'].mean()
return sog_mean >= MOVE_SPEED_THRESHOLD and \
d['sog'] < STOP_SPEED_THRESHOLD and \
len(prev) > 1 and (prev['sudden_stopping'] == False).all()
def append_sudden_stopping(ais):
ais.assign(sudden_stopping = None)
vessels = {}
for i, d in ais.iterrows():
mmsi = d['mmsi']
if not mmsi in vessels.keys():
vessels[mmsi] = {}
vessels[mmsi]['previous_observations'] = | pd.DataFrame(columns=ais.columns) | pandas.DataFrame |
# coding=utf-8
import pandas as pd
import numpy as np
import re
from matplotlib.ticker import FuncFormatter
def number_formatter(number, pos=None):
"""Convert a number into a human readable format."""
magnitude = 0
while abs(number) >= 1000:
magnitude += 1
number /= 1000.0
return '%.1f%s' % (number, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude])
def cuenta_tipo_de_dato(df,tipo):
"""
Esta función crea la tabla con información sobre la cantidad de cada tipo de dato encontrado en el csv
==========
* Args:
- df: el data frame al que se le va a realizar el conteo del tipo de dato.
- tipo: El nombre del tipo de dato que estamos buscando.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
# Para encontrar el tipo de dato numérico
>>conteo_nuericos = cuenta_tipo_de_dato(df, 'numerico')
# Para encontrar el tipo de dato texto
>>conteo_texto = cuenta_tipo_de_dato(df, 'object')
"""
vars_type = df.dtypes
vars_type = pd.DataFrame(vars_type, columns = ['tipo'])
if tipo == 'numerico':
cantidad_tipo = len(vars_type.loc[vars_type["tipo"] == "int64"])
cantidad_tipo = cantidad_tipo + len(vars_type.loc[vars_type["tipo"] == "float64"])
else:
cantidad_tipo = len(vars_type.loc[vars_type["tipo"] == tipo])
return cantidad_tipo
def cuenta_nulos_por_columnas(df):
"""
Función que realiza una tabla con la cuenta de missing values por columna y obtiene la proporción que estos missing
values representan del total.
==========
* Args:
- df: el data frame al que se le va a realizar el conteo de los nulos por cada columna.
* Return:
- Data Frame: entrega el data frame que indica cuantos elementos nulos fueron encontrados en cada columna.
==========
Ejemplo:
>>faltates_por_columna = cuenta_nulos_por_columnas(df)
"""
valores_nulos = df.isnull().sum()
porcentaje_valores_nulos = 100 * df.isnull().sum() / len(df)
tabla_valores_nulos = pd.concat([valores_nulos, porcentaje_valores_nulos], axis=1)
tabla_valores_nulos_ordenada = tabla_valores_nulos.rename(
columns={0: 'Missing Values', 1: '% del Total'})
tabla_valores_nulos_ordenada = tabla_valores_nulos_ordenada[
tabla_valores_nulos_ordenada.iloc[:, 1] != 0].sort_values(
'% del Total', ascending=False).round(1)
print("El dataframe tiene " + str(df.shape[1]) + " columnas.\n"
"Hay " + str(tabla_valores_nulos_ordenada.shape[0]) +
" columnas que tienen NA's.")
return tabla_valores_nulos_ordenada
def CreaTablaConteoPorcentaje(df, nomColumna, booleanNA):
"""
Esta función crea la tabla con información sobre los conteos y el porcentaje al que corresponden del total de los datos.
==========
* Args:
- df: el data frame completo.
- nomColumna: El nombre de la columna sobre la que se quiere realizar la tabla.
- booleanNA: Indicador booleano que indica si se requiere que se muestren los NA's en la tabla.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
>>df = CreaTablaConteoPorcentaje(df, 'RESPUESTA', True)
"""
df_resultado = df[nomColumna].value_counts(dropna=booleanNA)
df_resultado = pd.DataFrame(data=df_resultado)
df_resultado = df_resultado[nomColumna].map('{:,}'.format)
df_resultado = pd.DataFrame(data=df_resultado)
#obteniendo los porcentajes
df_resultado['porcentaje'] = df[nomColumna].value_counts(dropna=booleanNA, normalize=True).mul(100).round(2).astype(str)+'%'
return df_resultado
def CreaTablaConteoPorcentaje_sin_stringformat(df, nomColumna, booleanNA):
"""
Esta función crea la tabla con información sobre los conteos y el porcentaje al que corresponden del total de los datos.
==========
* Args:
- df: el data frame completo.
- nomColumna: El nombre de la columna sobre la que se quiere realizar la tabla.
- booleanNA: Indicador booleano que indica si se requiere que se muestren los NA's en la tabla.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
>>df = CreaTablaConteoPorcentaje(df, 'RESPUESTA', True)
"""
df_resultado = df[nomColumna].value_counts(dropna=booleanNA)
df_resultado = pd.DataFrame(data=df_resultado)
#obteniendo los porcentajes
df_resultado['porcentaje'] = df[nomColumna].value_counts(dropna=booleanNA, normalize=True).mul(100).round(2).astype(str)+'%'
return df_resultado
def StringLowercase(df):
"""
Función cambiar todos los strings de un dataframe a lowercase
(columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringLowercase(df)
"""
### Columnas
DataFrameColumns = df.columns
for col in DataFrameColumns:
df.rename(columns={col:col.lower()}, inplace=True)
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].str.lower()
return df
def StringAcentos(df):
"""
Función para eliminar acentos, dieresis y eñes de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringAcentos(df)
"""
### Columnas
df.columns = df.columns.str.replace('á', 'a')
df.columns = df.columns.str.replace('é', 'e')
df.columns = df.columns.str.replace('í', 'i')
df.columns = df.columns.str.replace('ó', 'o')
df.columns = df.columns.str.replace('ú', 'u')
df.columns = df.columns.str.replace('ü', 'u')
df.columns = df.columns.str.replace('ñ', 'n')
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')
return df
def StringStrip(df):
"""
Función para eliminar espacios al inicio y al final de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringStrip(df)
"""
### Columnas
df.columns = [col.strip() for col in df.columns]
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].apply(lambda x: x.strip() if isinstance(x, str) else x)
return df
def StringEspacios(df):
"""
Función para eliminar espacios dobles (o mas) de los strings de un
dataframe (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = StringEspacios(df)
"""
### Columnas
df.columns = [re.sub(' +', ' ', col) for col in df.columns]
### Observaciones
filtro = df.dtypes == np.object
objects = df.dtypes[filtro]
StringColumns = list(objects.index)
for col in StringColumns:
if col != 'geometry':
df[col] = df[col].apply(lambda x: re.sub(' +', ' ', x) if isinstance(x, str) else x)
return df
def EstandarizaFormato(df):
"""
Función para estandarizar un dataframe: minúsculas, sin espacios en blanco,
sin signos de puntuación (columnas y observaciones)
==========
* Args:
- df: dataframe al que se desea hacer la modificación.
* Return:
- df: dataframe modificado
==========
Ejemplo:
>>df = EstandarizaFormato(df)
"""
### Minúsculas
df = StringLowercase(df)
### Acentos
df = StringAcentos(df)
### Quitamos espacios al principio y al final
df = StringStrip(df)
### Quitamos espacios
df = StringEspacios(df)
### Quita espacios en columnas
df.columns = df.columns.str.replace(' ', '_')
return df
def prepara_dataset(df):
"""
Esta función hace las correcciones al dataset.
==========
* Args:
- df: el data frame al que se le van a hacer las correcciones.
* Return:
- Data Frame: entrega el data frame corregido.
==========
Ejemplo:
# Para encontrar el tipo de dato numérico
>>df = prepara_dataset(df)
"""
# Estandarizamos formato
df = EstandarizaFormato(df)
# cambiamos los tipos de variable
df = df.astype({"año_hechos":'category', "mes_hechos":'category', "delito":'category', "categoria_delito":'category',"fiscalia":'category', "agencia":'category'})
# cambiamos la columna geo_point
new = df['geo_point'].str.split(",", n = 1, expand = True)
df["latitud"]= new[0]
df["longitud"]= new[1]
# cambiamos el tipo para latitud y longitud
df = df.astype({"latitud":'float64', "longitud":'float64'})
# Eliminamos la columna geo_point
#df.drop(columns =["geo_point"], inplace = True)
# Eliminamos la columna geo_shape
#df.drop(columns =["geo_shape"], inplace = True)
return df
def genera_profiling_de_numericos(df,lista_numericas,vars_type):
"""
Función que genera un perfilamiento para los datos numéricos.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento para variables numéricas.
- lista_numericas: una lista con el nombre de las variables que son de tipo numérico.
- vars_type: tabla generada por la función cuenta_tipo_de_dato de este mismo script.
* Return:
- Data Frame: Data Frame con el perfilamiento para las variables numéricas.
==========
Ejemplo:
>>vars_type = cuenta_tipo_de_dato(df)
# Extraemos el nombre de las variables numericas:
>>variables_int = vars_type.loc[vars_type["tipo"] == "int64"]
>>variables_float = vars_type.loc[vars_type["tipo"] == "float64"]
>>variables_numericas = variables_int.append(variables_float, ignore_index=True)
>>lista_numericas = list(variables_numericas['variable'])
# Generamos el perfilamiento para esas variables
>>perfilamiento_de_numericas = genera_profiling_de_numericos(df,lista_numericas,vars_type)
"""
# Obtenemos los estadísticos de la columna si es numérica
lista_perfilamiento_numerico = ['Tipo','Número de observaciones', 'Media', 'Desviación estándar',
'Cuartil 25%','Cuartil 50%','Cuartil 75%','Mínimo','Máximo',
'Número de observaciones únicas','Número de faltantes','Top1/veces/%',
'Top2/veces/%','Top3/veces/%'
,'Top4/veces/%','Top5/veces/%']
datos_dataframe_profiling_numericas = {'Métrica':lista_perfilamiento_numerico}
dataframe_profiling_numericas = pd.DataFrame(data=datos_dataframe_profiling_numericas)
for col in lista_numericas:
# tipo de dato
vars_type_num = pd.DataFrame(vars_type)
#vars_type_num
df_tipo = pd.DataFrame(data=vars_type_num.loc[vars_type_num["variable"] == col])
tipo_dato=df_tipo['tipo'][0]
#print(tipo_dato)
# Obtenemos las métricas relevantes
descr_col = df[col].describe()
descr_col = pd.DataFrame(descr_col)
descr_col['Métrica']=descr_col.index
descr_col.columns=['valor','Métrica']
# número de observaciones
medida = 'count'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
num_observaciones_num = metrica['valor'][0]
#print(num_observaciones_num)
# media
medida = 'mean'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
media_obs_num = metrica['valor'][0]
media_obs_num = media_obs_num.round(2)
#print(media_obs_num)
# desviacion estándar
medida = 'std'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
sd_obs_num = metrica['valor'][0]
sd_obs_num = sd_obs_num.round(2)
#print(sd_obs_num)
# cuartil 25
medida = '25%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_25_obs_num = metrica['valor'][0]
cuant_25_obs_num = cuant_25_obs_num.round(2)
#print(cuant_25_obs_num)
# cuartil 50
medida = '50%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_50_obs_num = metrica['valor'][0]
cuant_50_obs_num = cuant_50_obs_num.round(2)
#print(cuant_50_obs_num)
#cuant_50_obs_num = agua.quantile(q=0.25)
#print(cuant_50_obs_num)
# cuartil 75
medida = '75%'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
cuant_75_obs_num = metrica['valor'][0]
cuant_75_obs_num = cuant_75_obs_num.round(2)
#print(cuant_75_obs_num)
#cuant_75_obs_num = agua.quantile(q=0.25)
#print(cuant_75_obs_num)
# minimo
medida = 'min'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
minimo_obs_num = metrica['valor'][0]
minimo_obs_num = minimo_obs_num.round(2)
#print(minimo_obs_num)
# maximo
medida = 'max'
metrica = descr_col.loc[descr_col["Métrica"] == medida]
maximo_obs_num = metrica['valor'][0]
maximo_obs_num = maximo_obs_num.round(2)
#print(maximo_obs_num)
# numero de observaciones unicas
num_obs_unicas_obs_num = df[col].nunique()
#print(num_obs_unicas_obs_num)
# Número de observaciones con valores faltantes
obs_faltantes_obs_num = df[col].isna().sum()
# top 5 observaciones repetidas
# df_resultado = df[col].value_counts(dropna=True)
# df_resultado = pd.DataFrame(df_resultado)
# df_resultado.columns=['conteo_top_5']
# df_resultado=df_resultado.sort_values('conteo_top_5', ascending = False)
#top5 = df_resultado.head(5)
#print(top5)
# generamos tabla para las modas
tabla_importantes = CreaTablaConteoPorcentaje(df,str(col),True)
tabla_importantes.columns = ['conteo','porcentaje']
top1 = tabla_importantes.index[0]
veces1 = list(tabla_importantes['conteo'])[0]
porcentaje1 = list(tabla_importantes['porcentaje'])[0]
datos_top1 = [top1,veces1,porcentaje1]
# #datos_top1 = list([tabla_importantes[0:1]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1]
if(len(tabla_importantes)>1):
top2 = tabla_importantes.index[1]
veces2 = list(tabla_importantes['conteo'])[1]
porcentaje2 = list(tabla_importantes['porcentaje'])[1]
datos_top2 = [top2,veces2,porcentaje2]
# datos_top2 = list([tabla_importantes[1:2]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2]
else:
datos_top2 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>2):
top3 = tabla_importantes.index[2]
veces3 = list(tabla_importantes['conteo'])[2]
porcentaje3 = list(tabla_importantes['porcentaje'])[2]
datos_top3 = [top3,veces3,porcentaje3]
# # datos_top3 = list([tabla_importantes[2:3]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3]
else:
datos_top3 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>3):
top4 = tabla_importantes.index[3]
veces4 = list(tabla_importantes['conteo'])[3]
porcentaje4 = list(tabla_importantes['porcentaje'])[3]
datos_top4 = [top4,veces4,porcentaje4]
# datos_top4 = list([tabla_importantes[3:4]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje'
# ,'top4/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3,
# datos_top4]
else:
datos_top4 = ['N/A','N/A','N/A']
if(len(tabla_importantes)>4):
top5 = tabla_importantes.index[4]
veces5 = list(tabla_importantes['conteo'])[4]
porcentaje5 = list(tabla_importantes['porcentaje'])[4]
datos_top5 = [top5,veces5,porcentaje5]
# datos_top5 = list([tabla_importantes[4:5]])
# lista_perfilamiento_numerico = ['tipo','numero de observaciones', 'media', 'desviacion estándar',
# 'cuartil 25%','cuartil 50%','cuartil 75%','minimo','maximo',
# 'numero de observaciones unicas','top1/veces/porcentaje',
# 'top2/veces/porcentaje','top3/veces/porcentaje'
# ,'top4/veces/porcentaje','top5/veces/porcentaje']
# datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
# cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
# maximo_obs_num,num_obs_unicas_obs_num,datos_top1,datos_top2,datos_top3,
# datos_top4,datos_top5]
else:
datos_top5 = ['N/A','N/A','N/A']
#print(obs_faltantes_obs_num)
datos_variable = [tipo_dato,num_observaciones_num,media_obs_num,sd_obs_num,
cuant_25_obs_num, cuant_50_obs_num,cuant_75_obs_num,minimo_obs_num,
maximo_obs_num,num_obs_unicas_obs_num,obs_faltantes_obs_num,datos_top1,datos_top2,datos_top3,
datos_top4,datos_top5]
# datos_dataframe_profiling_numericas = {'metrica':lista_perfilamiento_numerico}
# dataframe_profiling_numericas = pd.DataFrame(data=datos_dataframe_profiling_numericas)
dataframe_profiling_numericas[col]=datos_variable
return dataframe_profiling_numericas
def genera_profiling_general(df):
"""
Función que genera la tabla con un perfilamiento general del data set, sin entrar al detalle por variable.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento general.
* Return:
- Data Frame: entrega el data frame con un perfilamiento general del data set.
==========
Ejemplo:
>>perfilamiento_general = genera_profiling_general(df)
"""
cuenta_de_variables = len(df.columns)
cuenta_observaciones = len(df)
total_celdas = cuenta_de_variables*cuenta_observaciones
# Contamos el tipo de datos del dataset
vars_type = df.dtypes
vars_type = pd.DataFrame(vars_type, columns = ['tipo'])
# Asignamos un valor para cada tipo
## Numéricas
cantidad_numericas = len(vars_type.loc[vars_type["tipo"] == "int64"])
cantidad_numericas = cantidad_numericas + len(vars_type.loc[vars_type["tipo"] == "float64"])
#print(cantidad_numericas)
## Fechas
cantidad_fecha = len(vars_type.loc[vars_type["tipo"] == "datetime64[ns]"])
#print(cantidad_fecha)
## Categoricas
cantidad_categoricas = len(vars_type.loc[vars_type["tipo"] == "category"])
#print(cantidad_categoricas)
## Texto
cantidad_texto = len(vars_type.loc[vars_type["tipo"] == "object"])
#print(cantidad_texto)
# Contamos los faltantes
nulos_totales = cuenta_nulos_por_columnas(df)['Missing Values'].sum()
#print(nulos_totales)
# Obtenemos el porcentaje de datos que son faltantes
nulos_porcentaje = ((nulos_totales/(total_celdas))*100).round(1).astype(str)+'%'
#print(nulos_porcentaje)
# Obtenemos el total de columnas duplicadas
ds_duplicados = df.duplicated(subset=None, keep='first')
ds_duplicados = pd.DataFrame(ds_duplicados,columns = ['duplicated'])
numero_de_duplicados = len(ds_duplicados.loc[ds_duplicados["duplicated"] == True])
#print(numero_de_duplicados)
# Obtenemos el porcentaje de duplicados
porcentaje_de_duplicados = str(((numero_de_duplicados/(total_celdas))*100))+'%'
#print(porcentaje_de_duplicados)
estadisticas = ['Total de variables','Conteo de observaciones','Total de celdas',
'Cantidad de variables numéricas','Cantidad de variables de fecha',
'Cantidad de variables categóricas', 'Cantidad de variables de texto',
'Valores faltantes','Porcentaje de valores faltantes',
'Renglones duplicados', 'Porcentaje de valores duplicados']
valores_estadisticas = [cuenta_de_variables,cuenta_observaciones,total_celdas,cantidad_numericas,
cantidad_fecha,cantidad_categoricas,cantidad_texto,nulos_totales,nulos_porcentaje,
numero_de_duplicados,porcentaje_de_duplicados]
valores = {'Estadísticas':estadisticas,'Resultado':valores_estadisticas}
df_perfilamiento_general = pd.DataFrame(data=valores)
return df_perfilamiento_general
def genera_profiling_de_categorias(df, lista_category,vars_type):
"""
Función que genera un perfilamiento para los datos categóricos.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento para variables categóricas.
- lista_category: una lista con el nombre de las variables que son de tipo categórico.
- vars_type: tabla generada por la función cuenta_tipo_de_dato de este mismo script.
* Return:
- Data Frame: Data Frame con el perfilamiento para las variables categóricas.
==========
Ejemplo:
>>vars_type = cuenta_tipo_de_dato(df)
# Extraemos el nombre de las variables categoricas:
>>variables_category = vars_type.loc[vars_type["tipo"] == "category"]
>>lista_category = list(variables_category['variable'])
# Generamos el perfilamiento para esas variables
>>profiling_de_categorias = genera_profiling_de_categorias(df,lista_category,vars_type)
"""
# Obtenemos los estadísticos de la columna si es catagorica
lista_perfilamiento_categorico = ['Tipo','Número de categorías', 'Número de observaciones',
'Observaciones nulas','% Observaciones nulas', 'Valores únicos',
'Moda1/veces/%','Moda2/veces/%','Moda3/veces/%']
datos_dataframe_profiling_categoricos = {'Métrica':lista_perfilamiento_categorico}
dataframe_profiling_categoricas = pd.DataFrame(data=datos_dataframe_profiling_categoricos)
for col in lista_category:
#Tipo de dato
vars_type_cat = pd.DataFrame(vars_type)
#vars_type_cat
df_tipo = pd.DataFrame(data=vars_type_cat.loc[vars_type_cat["variable"] == col])
tipo_dato=df_tipo['tipo'][0]
#Obtenemos las métricas relevantes
descr_col = df[col]
descr_col = pd.DataFrame(descr_col)
descr_col['metrica']=descr_col.index
descr_col.columns=['valor','Métrica']
#Numero de categorias
num_categorias=descr_col.nunique()["valor"]
#Numero de observaciones
num_observaciones=len(descr_col)
#Valores nulos
num_obs_nulas=df[col].isna().sum()
#%Valores nulos
por_obs_nulas=num_obs_nulas/num_observaciones
#Valor de las categorias
valores_unicos = list(df[col].unique())
#Generamos tabla para las modas
tabla_importantes = CreaTablaConteoPorcentaje(df,str(col),True)
tabla_importantes.columns = ['conteo','porcentaje']
moda1 = tabla_importantes.index[0]
veces1 = tabla_importantes['conteo'][0]
porcentaje1 = tabla_importantes['porcentaje'][0]
datos_moda1 = [moda1,veces1,porcentaje1]
moda2 = tabla_importantes.index[1]
veces2 = tabla_importantes['conteo'][1]
porcentaje2 = tabla_importantes['porcentaje'][1]
datos_moda2 = [moda2,veces2,porcentaje2]
moda3 = tabla_importantes.index[2]
veces3 = tabla_importantes['conteo'][2]
porcentaje3 = tabla_importantes['porcentaje'][2]
datos_moda3 = [moda3,veces3,porcentaje3]
datos_variable = [tipo_dato,num_categorias,num_observaciones,num_obs_nulas,por_obs_nulas,
valores_unicos,datos_moda1,datos_moda2,datos_moda3]
dataframe_profiling_categoricas[col]=datos_variable
return dataframe_profiling_categoricas
def genera_profiling_de_texto(df,lista_texto,vars_type):
"""
Función que genera un perfilamiento para los datos de tipo texto.
==========
* Args:
- df: el data frame al que se le va a realizar el perfilamiento para variables de texto.
- lista_texto: una lista con el nombre de las variables que son de tipo texto (object).
- vars_type: tabla generada por la función cuenta_tipo_de_dato de este mismo script.
* Return:
- Data Frame: Data Frame con el perfilamiento para las variables categóricas.
==========
Ejemplo:
>>vars_type = cuenta_tipo_de_dato(df)
# Extraemos el nombre de las variables de texto:
>>variables_texto = vars_type.loc[vars_type["tipo"] == "object"]
>>lista_texto = list(variables_texto['variable'])
# Generamos el perfilamiento para esas variables
>>profiling_de_texto = genera_profiling_de_texto(df,lista_texto,vars_type)
"""
# Obtenemos los estadísticos de la columna si es catagorica
lista_perfilamiento_txt = ['Tipo','Número de observaciones', 'Observaciones únicas', '% Observaciones únicas',
'Observaciones nulas', '% Observaciones nulas', 'Tamaño promedio','Tamaño mínimo','Tamaño máximo']
datos_dataframe_profiling_txt = {'Métrica':lista_perfilamiento_txt}
dataframe_profiling_txt = | pd.DataFrame(data=datos_dataframe_profiling_txt) | pandas.DataFrame |
import logging
import pandas as pd
import glob
import os
import sys
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
import requests
from zipfile import ZipFile
import urllib
import numpy as np
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# using preexisting table for this dataset
dataset_name = 'foo_062_rw0_fishery_production' #check
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory
'''
# insert the url used to download the data from the source website
url_list = ['http://www.fao.org/fishery/static/Data/GlobalProduction_2021.1.2.zip', 'http://www.fao.org/fishery/static/Data/Aquaculture_2021.1.2.zip', 'http://www.fao.org/fishery/static/Data/Capture_2021.1.2.zip'] #check
# construct the file paths to raw data files
raw_data_file = [os.path.join(data_dir,os.path.basename(url)) for url in url_list]
raw_data_file_unzipped = [file.split('.')[0] for file in raw_data_file]
for url, file in zip(url_list, raw_data_file):
# download the data from the source
r = requests.get(url)
with open(file, 'wb') as f:
f.write(r.content)
for file, unzipped in zip(raw_data_file, raw_data_file_unzipped):
# unzip source data
zip_ref = ZipFile(file, 'r')
zip_ref.extractall(unzipped)
zip_ref.close()
'''
Process the data
'''
# create a list to store the processed dataframes
processed_df = []
for file in raw_data_file_unzipped:
# read the dataset as a pandas dataframe
csv_data = glob.glob(os.path.join(file,'*QUANTITY.csv'))[0]
df_data = pd.read_csv(csv_data,encoding='latin-1')
# read the country code list as a pandas dataframe
csv_countries = glob.glob(os.path.join(file,'*COUNTRY_GROUPS.csv'))[0]
countries_df = pd.read_csv(csv_countries, encoding='latin-1')
# rename the UN Code column in the country code list to match the column in the dataset
countries_df.rename(columns={'UN_Code':'COUNTRY.UN_CODE'}, inplace=True)
# merge the dataframes so each country code in the dataset is matched with an ISO code and its full name
df = pd.merge(df_data,countries_df[['COUNTRY.UN_CODE','ISO3_Code','Name_En']], on='COUNTRY.UN_CODE', how='left')
# add a column to reflect the type of production measured by the value column for the dataset (ex GlobalProduction, Aquaculture, or Capture) and the variable (quantity)
type = os.path.basename(file).split('_')[0]
df['type'] = type + '_quantity'
# convert the data type of the value column to float
df['VALUE'] = df['VALUE'].astype(float)
# add the processed dataframe to the list
processed_df.append(df)
# There is additional data in the Aquaculture dataset on value
# Process this data following the procedure above
if type == 'Aquaculture':
# read the dataset as a pandas dataframe
csv_aqua_value = glob.glob(os.path.join(file,'*VALUE.csv'))[0]
df_aqua_value = pd.read_csv(csv_aqua_value,encoding='latin-1')
# merge the dataframes so each country code in the dataset is matched with an ISO code and its full name
df = pd.merge(df_aqua_value,countries_df[['COUNTRY.UN_CODE','ISO3_Code','Name_En']], on='COUNTRY.UN_CODE', how='left')
# add a column to reflect the type of production measured by the value column for the dataset (ex GlobalProduction, Aquaculture, or Capture) and the variable (value)
type = os.path.basename(file).split('_')[0]
df['type'] = type + '_value'
# convert the data type of the value column to float
df['VALUE'] = df['VALUE'].astype(float)
# add the processed dataframe to the list
processed_df.append(df)
# join the three datasets
df = pd.concat(processed_df)
# rename the period column to year
df.rename(columns={'PERIOD':'year'}, inplace=True)
# pivot the table from long to wide form
# to sum the values for each type of production of a country in a given year
table = pd.pivot_table(df, values='VALUE', index=['ISO3_Code', 'year','MEASURE'], columns=['type'], aggfunc=np.sum)
# turn all column names to lowercase
table.columns = [x.lower() for x in table.columns]
# convert Year column to datetime object
df['datetime'] = | pd.to_datetime(df.year, format='%Y') | pandas.to_datetime |
import pandas as pd
from pandas_datareader import data
start_date = '2014-01-01'
end_date = '2018-01-01'
SRC_DATA_FILENAME = 'goog_data.pkl'
try:
goog_data2 = pd.read_pickle(SRC_DATA_FILENAME)
except FileNotFoundError:
goog_data2 = data.DataReader('GOOG', 'yahoo', start_date, end_date)
goog_data2.to_pickle(SRC_DATA_FILENAME)
goog_data = goog_data2.tail(620)
close = goog_data['Close']
'''
The Moving Average Convergence Divergence
(MACD) was developed by <NAME>, and is based on the differences
between two moving averages of different lengths, a Fast and a Slow moving
average. A second line, called the Signal line is plotted as a moving
average of the MACD. A third line, called the MACD Histogram is
optionally plotted as a histogram of the difference between the
MACD and the Signal Line.
MACD = FastMA - SlowMA
Where:
FastMA is the shorter moving average and SlowMA is the longer moving average.
SignalLine = MovAvg (MACD)
MACD Histogram = MACD - SignalLine
'''
num_periods_fast = 10 # fast EMA time period
K_fast = 2 / (num_periods_fast + 1) # fast EMA smoothing factor
ema_fast = 0
num_periods_slow = 40 # slow EMA time period
K_slow = 2 / (num_periods_slow + 1) # slow EMA smoothing factor
ema_slow = 0
num_periods_macd = 20 # MACD EMA time period
K_macd = 2 / (num_periods_macd + 1) # MACD EMA smoothing factor
ema_macd = 0
ema_fast_values = [] # track fast EMA values for visualization purposes
ema_slow_values = [] # track slow EMA values for visualization purposes
macd_values = [] # track MACD values for visualization purposes
macd_signal_values = [] # MACD EMA values tracker
macd_historgram_values = [] # MACD - MACD-EMA
for close_price in close:
if (ema_fast == 0): # first observation
ema_fast = close_price
ema_slow = close_price
else:
ema_fast = (close_price - ema_fast) * K_fast + ema_fast
ema_slow = (close_price - ema_slow) * K_slow + ema_slow
ema_fast_values.append(ema_fast)
ema_slow_values.append(ema_slow)
macd = ema_fast - ema_slow # MACD is fast_MA - slow_EMA
if ema_macd == 0:
ema_macd = macd
else:
ema_macd = (macd - ema_macd) * K_macd + ema_macd # signal is EMA of MACD values
macd_values.append(macd)
macd_signal_values.append(ema_macd)
macd_historgram_values.append(macd - ema_macd)
goog_data = goog_data.assign(ClosePrice=pd.Series(close, index=goog_data.index))
goog_data = goog_data.assign(FastExponential10DayMovingAverage=pd.Series(ema_fast_values, index=goog_data.index))
goog_data = goog_data.assign(SlowExponential40DayMovingAverage=pd.Series(ema_slow_values, index=goog_data.index))
goog_data = goog_data.assign(MovingAverageConvergenceDivergence=pd.Series(macd_values, index=goog_data.index))
goog_data = goog_data.assign(Exponential20DayMovingAverageOfMACD= | pd.Series(macd_signal_values, index=goog_data.index) | pandas.Series |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/5/2 15:58
Desc: 东方财富-股票-财务分析
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_balance_sheet_by_report_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-资产负债表-按报告期
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 资产负债表-按报告期
:rtype: pandas.DataFrame
"""
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbDateAjaxNew"
params = {
"companyType": "4",
"reportDateType": "0",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i: i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbAjaxNew"
params = {
"companyType": "4",
"reportDateType": "0",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_balance_sheet_by_yearly_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-资产负债表-按年度
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 资产负债表-按年度
:rtype: pandas.DataFrame
"""
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbDateAjaxNew"
params = {
"companyType": "4",
"reportDateType": "1",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i: i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbAjaxNew"
params = {
"companyType": "4",
"reportDateType": "1",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_profit_sheet_by_report_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-利润表-报告期
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 利润表-报告期
:rtype: pandas.DataFrame
"""
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbDateAjaxNew"
params = {
"companyType": "4",
"reportDateType": "0",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i: i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbAjaxNew"
params = {
"companyType": "4",
"reportDateType": "0",
"reportType": "1",
"code": symbol,
"dates": item,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = | pd.DataFrame(data_json["data"]) | pandas.DataFrame |
import itertools
import json
import os
import gym
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.ticker import MultipleLocator
import envs
CIFAR10_CLASSES = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
def figsize_third(scale, height_ratio=1.0):
fig_width_pt = 156 # Get this from LaTeX using \the\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean * height_ratio # height in inches
fig_size = [fig_width, fig_height]
return fig_size
def figsize_column(scale, height_ratio=1.0):
fig_width_pt = 234 # Get this from LaTeX using \the\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean * height_ratio # height in inches
fig_size = [fig_width, fig_height]
return fig_size
def figsize_text(scale, height_ratio=1.0):
fig_width_pt = 468 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean * height_ratio # height in inches
fig_size = [fig_width, fig_height]
return fig_size
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or luatex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 8,
"font.size": 8,
"legend.fontsize": 8,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
"figure.figsize": figsize_column(1.0),
"legend.framealpha": 1.0,
"text.latex.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts because your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
}
matplotlib.rcParams.update(pgf_with_latex)
BASICNAMES = {
"blur": "Blur",
"fliplr": "Flip L/R",
"flipud": "Flip U/D",
"grayscale": "Grayscale",
"invert": "Invert",
"rotation": "Rotation",
"shear": "Shear",
}
def read_files(environment, scenario, dataset, agent, logbasedir):
summary = {}
stats = {}
# for ag in [agent]:
expdir = "exp_{environment}_{scenario}_{dataset}_{agent}".format(
environment=environment, scenario=scenario, dataset=dataset, agent=agent
)
logdir = os.path.join(logbasedir, expdir)
df_summary, df_stats = group_logs(logdir)
df_summary[["rel_failure", "original_accuracy", "modified_accuracy"]] *= 100
summary[agent] = df_summary
stats[agent] = df_stats
bl = get_baseline(environment, scenario, dataset)
return summary, stats, bl
def evaluate_classification(scenario, dataset, agent, logbasedir, outdir):
SUM_NEW_COLS = {
"rel_failure": "Failure Rate",
"original_accuracy": "Accuracy (Orig.)",
"modified_accuracy": "Accuracy (MR)",
"failure_baseline": "Failure Rate (Baseline)",
"modified_accuracy_baseline": "Accuracy (MR, Baseline)",
}
plot_charts(
"classification", scenario, dataset, agent, logbasedir, outdir, SUM_NEW_COLS
)
def evaluate_detection(scenario, dataset, agent, logbasedir, outdir):
SUM_NEW_COLS = {
"rel_failure": "Failure Rate",
"original_accuracy": "mAP (Orig.)",
"modified_accuracy": "mAP (MR)",
"failure_baseline": "Failure Rate (Baseline)",
"modified_accuracy_baseline": "mAP (MR, Baseline)",
}
plot_charts("detection", scenario, dataset, agent, logbasedir, outdir, SUM_NEW_COLS)
def plot_charts(
environment, scenario, dataset, agent, logbasedir, outdir, summary_columns
):
summary, stats, bl = read_files(environment, scenario, dataset, agent, logbasedir)
print(environment, scenario, dataset, agent)
print("avg. duration per iteration: ", summary[agent].duration.mean())
#print("rel_failure: ", summary[agent].rel_failure.last())
print("orig. accuracy: ", summary[agent].original_accuracy.mean())
print("mod. accuracy: ", summary[agent].modified_accuracy.mean())
plot_progress(
environment, dataset, scenario, agent, summary[agent], bl, outdir, summary_columns
)
if scenario == "basic":
plot_basic_action_distribution(
environment, dataset, scenario, agent, stats[agent], bl, outdir
)
elif scenario == "rotation":
plot_parametrized_action_distribution(
environment, dataset, scenario, "rotation", agent, stats[agent], bl, outdir
)
elif scenario == "shear":
plot_parametrized_action_distribution(
environment, dataset, scenario, "shear", agent, stats[agent], bl, outdir
)
else:
rot_actions = stats[agent].action.str.match(r"rot[-\d]+")
shear_actions = stats[agent].action.str.match(r"shear[-\d]+")
plot_basic_action_distribution(
environment,
dataset,
scenario,
agent,
stats[agent][(~rot_actions) & (~shear_actions)],
bl,
outdir,
)
plot_parametrized_action_distribution(
environment,
dataset,
scenario,
"rotation",
agent,
stats[agent][rot_actions],
bl,
outdir,
)
plot_parametrized_action_distribution(
environment,
dataset,
scenario,
"shear",
agent,
stats[agent][shear_actions],
bl,
outdir,
)
def plot_progress(environment, dataset, scenario, agent, df_summary, bl, outdir, colnames):
df_summary["failure_baseline"] = bl["failure"].mean() * 100
df_summary["modified_accuracy_baseline"] = bl["modified_accuracy"].mean() * 100
#colors = sns.color_palette("colorblind", n_colors=3)
colors = ["#9b59b6", "#3498db", "#e74c3c"]
c1 = colors[0]
c2 = colors[1]
c3 = colors[2]
ax = df_summary.rename(columns=colnames).plot(
x="iteration",
y=[
colnames["original_accuracy"],
colnames["modified_accuracy"],
colnames["rel_failure"],
colnames["modified_accuracy_baseline"],
colnames["failure_baseline"],
],
style=["-", "-", "-", "--", "--"],
color=[c1, c2, c3, c2, c3],
figsize=figsize_column(1.1, height_ratio=0.7),
)
ax.grid()
ax.set_xlabel("Iteration")
ax.set_xlim([0, df_summary.iteration.max()])
ax.set_ylim([0, 100])
# if environment == 'classification':
# ax.set_ylim([25, 100])
plt.locator_params(axis="y", nbins=7)
hand, labl = ax.get_legend_handles_labels()
ax.legend(
hand[:3], labl[:3], ncol=2, loc="upper center", bbox_to_anchor=(0.5, 1.35), fancybox=False
)
sns.despine()
plt.savefig(
os.path.join(
outdir, "{}-{}-{}-{}-process.pgf".format(environment, dataset, scenario, agent)
),
dpi=500,
bbox_inches="tight",
pad_inches=0,
)
def plot_basic_action_distribution(
environment, dataset, scenario, agent, df_stats, bl, outdir
):
bl["action"].replace(BASICNAMES, inplace=True)
df_stats["action"].replace(BASICNAMES, inplace=True)
df_final = df_stats.loc[
df_stats.iteration == df_stats.iteration.max(), ["action", "rel_failure"]
]
df_final.set_index("action", inplace=True)
combdf = df_final.join(bl[["action", "failure"]].groupby("action").mean())
combdf.rename(
columns={"failure": "Baseline", "rel_failure": "Tetraband"}, inplace=True
)
combdf *= 100
combdf.sort_values(["Tetraband"], inplace=True)
palette = sns.color_palette("colorblind", 4)
if environment == "detection":
palette = palette[2:]
sns.set_palette(palette)
combdf.round(2).to_latex(
open(
os.path.join(
outdir,
"{}-{}-{}-main-{}-actions.tex".format(
environment, dataset, scenario, agent
),
),
"w",
)
)
ax = combdf.plot.bar(
y=["Tetraband", "Baseline"], figsize=figsize_text(1.1, height_ratio=0.3),
width=0.85,
edgecolor='white'
)
bars = ax.patches
hatches = ''.join(h * len(combdf) for h in '/.')
for bar, hatch in zip(bars, hatches):
bar.set_hatch(hatch)
labels = ax.get_xticklabels()
ax.grid(axis="y")
ax.set_axisbelow(True)
ax.set_xticklabels(labels, rotation=0)
ax.set_xlabel("")
ax.set_ylabel("Failure Rate (in %)")
ax.legend(loc="upper left")
plt.locator_params(axis="y", nbins=7)
sns.despine()
plt.savefig(
"{}-{}-{}-main-{}-actions.pdf".format(environment, dataset, scenario, agent),
dpi=500,
bbox_inches="tight",
pad_inches=0,
)
plt.savefig(
os.path.join(
outdir,
"{}-{}-{}-main-{}-actions.pgf".format(
environment, dataset, scenario, agent
),
),
dpi=500,
bbox_inches="tight",
pad_inches=0,
)
plt.close()
def plot_parametrized_action_distribution(
environment, dataset, scenario, action_name, agent, df_stats, bl, outdir
):
df_final = df_stats.loc[
df_stats.iteration == df_stats.iteration.max(), ["action", "rel_failure"]
]
df_final.set_index("action", inplace=True)
combdf = df_final.join(bl[["parameter", "failure"]].groupby("parameter").mean())
combdf.rename(
columns={"failure": "Baseline", "rel_failure": "Tetraband"}, inplace=True
)
combdf *= 100
combdf["deg"] = combdf.index.str.replace(r"[a-z]+", "").map(int)
combdf = combdf.append(pd.Series({"deg": 0, "Baseline": 0, "Tetraband": 0}), ignore_index=True)
combdf.sort_values(["deg"], inplace=True)
combdf.set_index("deg", inplace=True)
combdf.round(2).to_latex(
open(
os.path.join(
outdir,
"{}-{}-{}-{}-{}-actions.tex".format(
environment, dataset, scenario, action_name, agent
),
),
"w",
)
)
palette = sns.color_palette("colorblind", 4)
# palette = sns.color_palette("Paired", 4)
if environment == "classification":
palette = palette[:2]
else:
palette = palette[2:]
sns.set_palette(palette)
fig, ax = plt.subplots(figsize=figsize_column(1.1, height_ratio=0.75))
combdf["Tetraband"].plot.line(linestyle='-', ax=ax)
combdf["Baseline"].plot.line(linestyle='--', ax=ax)
# ax = combdf.plot.bar(
# y=["Tetraband", "Baseline"],
# # edgecolor='white',
# #width=0.9 if action_name == "rotation" else 0.8,
# figsize=figsize_text(1.1, height_ratio=0.3),
# )
if action_name == "rotation":
ax.set_ylabel("Failure Rate (in \%)")
ax.xaxis.set_major_locator(MultipleLocator(20))
else:
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.legend(fancybox=False, loc="upper center", ncol=1)
ax.xaxis.set_minor_locator(MultipleLocator(5))
ax.axvline(0, linewidth=1, linestyle="--", c="k")
ax.grid(axis="y", which='both')
ax.set_xlabel("")
ax.yaxis.set_minor_locator(MultipleLocator(10))
if environment == "classification":
ax.set_ylim([0, 80])
else:
ax.set_ylim([50, 100])
plt.locator_params(axis="y", nbins=5)
sns.despine()
plt.savefig(
"{}-{}-{}-{}-{}-actions-wide.pdf".format(
environment, dataset, scenario, action_name, agent
),
dpi=500,
bbox_inches="tight",
pad_inches=0,
)
plt.savefig(
os.path.join(
outdir,
"{}-{}-{}-{}-{}-actions-wide.pgf".format(
environment, dataset, scenario, action_name, agent
),
),
dpi=500,
bbox_inches="tight",
pad_inches=0,
)
plt.close()
def get_action_names(basename, scenario, dataset):
env_name = "{basename}-{scenario}-{dataset}-v0".format(
basename=basename, scenario=scenario, dataset=dataset
)
env = gym.make(env_name)
action_names = env.action_names()
return action_names
def get_baseline(envname, scenario, dataset):
filename = "baseline_{}_{}_{}.csv".format(envname, scenario, dataset)
df = pd.read_csv(os.path.join("logs", filename), sep=";")
if envname == "classification":
df["original_accuracy"] = df["original"] == df["label"]
df["modified_accuracy"] = df["prediction"] == df["label"]
df["failure"] = df["original"] != df["prediction"]
else:
df["original_accuracy"] = df["original_score"]
df["modified_accuracy"] = df["modified_score"]
# Success was defined as a passing test case, but in the evaluation we see it differently
df["failure"] = ~df["success"]
if scenario in ("rotation", "shear"):
df["parameter"] = df["action"]
df["action"] = scenario
df["action_orig"] = df["action"]
for actidx, actname in enumerate(
get_action_names(envs.BASENAMES[envname], scenario, dataset)
):
df.loc[df.action == actidx, "action"] = actname
return df
def load_log(logfile):
summary = []
action_stats = []
for l in open(logfile, "r"):
rowdict = json.loads(l)
# rowdict = yaml.safe_load(l)
summary.append({k: v for k, v in rowdict.items() if k != "statistics"})
for act in rowdict["statistics"].values():
act["iteration"] = rowdict["iteration"]
action_stats.append(act)
# print(rowdict)
df_summary = pd.DataFrame.from_records(summary)
df_summary.rename(columns={"success": "failure"}, inplace=True)
df_summary["rel_failure"] = df_summary["failure"] / df_summary["iteration"]
df_stats = | pd.DataFrame.from_records(action_stats) | pandas.DataFrame.from_records |
from time import time
from typing import Tuple, Mapping, Optional, Sequence, TYPE_CHECKING
from itertools import product
import sys
import pytest
from scanpy import settings as s
from anndata import AnnData
from scanpy.datasets import blobs
import scanpy as sc
from pandas.testing import assert_frame_equal
import numpy as np
import pandas as pd
from squidpy.gr import ligrec
from squidpy.gr._ligrec import PermutationTest
from squidpy._constants._pkg_constants import Key
_CK = "leiden"
Interactions_t = Tuple[Sequence[str], Sequence[str]]
Complexes_t = Sequence[Tuple[str, str]]
class TestInvalidBehavior:
def test_not_adata(self):
with pytest.raises(TypeError, match=r"Expected `adata` to be of type `anndata.AnnData`"):
ligrec(None, _CK)
def test_adata_no_raw(self, adata: AnnData):
del adata.raw
with pytest.raises(AttributeError, match=r"No `.raw` attribute"):
ligrec(adata, _CK, use_raw=True)
def test_raw_has_different_n_obs(self, adata: AnnData):
adata.raw = blobs(n_observations=adata.n_obs + 1)
with pytest.raises(ValueError, match=rf"Expected `{adata.n_obs}` cells in `.raw`"):
ligrec(adata, _CK)
def test_invalid_cluster_key(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(KeyError, match=r"Cluster key `foobar` not found"):
ligrec(adata, cluster_key="foobar", interactions=interactions)
def test_cluster_key_is_not_categorical(self, adata: AnnData, interactions: Interactions_t):
adata.obs[_CK] = adata.obs[_CK].astype("string")
with pytest.raises(TypeError, match=rf"Expected `adata.obs\[{_CK!r}\]` to be `categorical`"):
ligrec(adata, _CK, interactions=interactions)
def test_only_1_cluster(self, adata: AnnData, interactions: Interactions_t):
adata.obs["foo"] = 1
adata.obs["foo"] = adata.obs["foo"].astype("category")
with pytest.raises(ValueError, match=r"Expected at least `2` clusters, found `1`."):
ligrec(adata, "foo", interactions=interactions)
def test_invalid_complex_policy(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Invalid option `foobar` for `ComplexPolicy`."):
ligrec(adata, _CK, interactions=interactions, complex_policy="foobar")
def test_invalid_fdr_axis(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Invalid option `foobar` for `CorrAxis`."):
ligrec(adata, _CK, interactions=interactions, corr_axis="foobar", corr_method="fdr_bh")
def test_too_few_permutations(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Expected `n_perms` to be positive"):
ligrec(adata, _CK, interactions=interactions, n_perms=0)
def test_invalid_interactions_type(self, adata: AnnData):
with pytest.raises(TypeError, match=r"Expected either a `pandas.DataFrame`"):
ligrec(adata, _CK, interactions=42)
def test_invalid_interactions_dict(self, adata: AnnData):
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions={"foo": ["foo"], "target": ["bar"]})
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions={"source": ["foo"], "bar": ["bar"]})
def test_invalid_interactions_dataframe(self, adata: AnnData, interactions: Interactions_t):
df = pd.DataFrame(interactions, columns=["foo", "target"])
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions=df)
df = pd.DataFrame(interactions, columns=["source", "bar"])
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions=df)
def test_interactions_invalid_sequence(self, adata: AnnData, interactions: Interactions_t):
interactions += ("foo", "bar", "bar") # type: ignore
with pytest.raises(ValueError, match=r"Not all interactions are of length `2`."):
ligrec(adata, _CK, interactions=interactions)
def test_interactions_only_invalid_names(self, adata: AnnData):
with pytest.raises(ValueError, match=r"After filtering by genes"):
ligrec(adata, _CK, interactions=["foo", "bar", "baz"])
def test_invalid_clusters(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Invalid cluster `'foo'`."):
ligrec(adata, _CK, interactions=interactions, clusters=["foo"])
def test_invalid_clusters_mix(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Expected a `tuple` of length `2`, found `3`."):
ligrec(adata, _CK, interactions=interactions, clusters=["foo", ("bar", "baz")])
class TestValidBehavior:
def test_do_not_use_raw(self, adata: AnnData, interactions: Interactions_t):
del adata.raw
_ = PermutationTest(adata, use_raw=False)
def test_all_genes_capitalized(self, adata: AnnData, interactions: Interactions_t):
pt = PermutationTest(adata).prepare(interactions=interactions)
genes = pd.Series([g for gs in pt.interactions[["source", "target"]].values for g in gs], dtype="string")
np.testing.assert_array_equal(genes.values, genes.str.upper().values)
np.testing.assert_array_equal(pt._data.columns, pt._data.columns.str.upper())
def test_complex_policy_min(self, adata: AnnData, complexes: Complexes_t):
g = adata.raw.var_names
pt = PermutationTest(adata).prepare(interactions=complexes, complex_policy="min")
assert pt.interactions.shape == (5, 2)
assert np.mean(adata.raw[:, g[2]].X) > np.mean(adata.raw[:, g[3]].X) # S
assert np.mean(adata.raw[:, g[6]].X) < np.mean(adata.raw[:, g[7]].X) # T
assert np.mean(adata.raw[:, g[8]].X) < np.mean(adata.raw[:, g[9]].X) # S
assert np.mean(adata.raw[:, g[10]].X) > np.mean(adata.raw[:, g[11]].X) # T
np.testing.assert_array_equal(pt.interactions["source"], list(map(str.upper, [g[0], g[3], g[5], g[8], g[12]])))
np.testing.assert_array_equal(pt.interactions["target"], list(map(str.upper, [g[1], g[4], g[6], g[11], g[13]])))
def test_complex_policy_all(self, adata: AnnData, complexes: Complexes_t):
g = adata.raw.var_names
pt = PermutationTest(adata).prepare(interactions=complexes, complex_policy="all")
assert pt.interactions.shape == (10, 2)
np.testing.assert_array_equal(
pt.interactions.values,
pd.DataFrame(
[
[g[0], g[1]],
[g[2], g[4]],
[g[3], g[4]],
[g[5], g[6]],
[g[5], g[7]],
[g[8], g[10]],
[g[8], g[11]],
[g[9], g[10]],
[g[9], g[11]],
[g[12], g[13]],
]
)
.applymap(str.upper)
.values,
)
def test_fdr_axis_works(self, adata: AnnData, interactions: Interactions_t):
rc = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
corr_axis="clusters",
seed=42,
n_jobs=1,
show_progress_bar=False,
copy=True,
)
ri = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
corr_axis="interactions",
n_jobs=1,
show_progress_bar=False,
seed=42,
copy=True,
)
np.testing.assert_array_equal(np.where(np.isnan(rc["pvalues"])), np.where(np.isnan(ri["pvalues"])))
mask = np.isnan(rc["pvalues"])
assert not np.allclose(rc["pvalues"].values[mask], ri["pvalues"].values[mask])
def test_inplace_default_key(self, adata: AnnData, interactions: Interactions_t):
key = Key.uns.ligrec(_CK)
assert key not in adata.uns
res = ligrec(adata, _CK, interactions=interactions, n_perms=5, copy=False, show_progress_bar=False)
assert res is None
assert isinstance(adata.uns[key], dict)
r = adata.uns[key]
assert len(r) == 3
assert isinstance(r["means"], pd.DataFrame)
assert isinstance(r["pvalues"], pd.DataFrame)
assert isinstance(r["metadata"], pd.DataFrame)
def test_inplace_key_added(self, adata: AnnData, interactions: Interactions_t):
assert "foobar" not in adata.uns
res = ligrec(
adata, _CK, interactions=interactions, n_perms=5, copy=False, key_added="foobar", show_progress_bar=False
)
assert res is None
assert isinstance(adata.uns["foobar"], dict)
r = adata.uns["foobar"]
assert len(r) == 3
assert isinstance(r["means"], pd.DataFrame)
assert isinstance(r["pvalues"], pd.DataFrame)
assert isinstance(r["metadata"], pd.DataFrame)
def test_return_no_write(self, adata: AnnData, interactions: Interactions_t):
assert "foobar" not in adata.uns
r = ligrec(
adata, _CK, interactions=interactions, n_perms=5, copy=True, key_added="foobar", show_progress_bar=False
)
assert "foobar" not in adata.uns
assert len(r) == 3
assert isinstance(r["means"], pd.DataFrame)
assert isinstance(r["pvalues"], pd.DataFrame)
assert isinstance(r["metadata"], pd.DataFrame)
@pytest.mark.parametrize("fdr_method", [None, "fdr_bh"])
def test_pvals_in_correct_range(self, adata: AnnData, interactions: Interactions_t, fdr_method: Optional[str]):
r = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
copy=True,
show_progress_bar=False,
corr_method=fdr_method,
threshold=0,
)
if np.sum(np.isnan(r["pvalues"].values)) == np.prod(r["pvalues"].shape):
assert fdr_method == "fdr_bh"
else:
assert np.nanmax(r["pvalues"].values) <= 1.0, np.nanmax(r["pvalues"].values)
assert np.nanmin(r["pvalues"].values) >= 0, np.nanmin(r["pvalues"].values)
def test_result_correct_index(self, adata: AnnData, interactions: Interactions_t):
r = ligrec(adata, _CK, interactions=interactions, n_perms=5, copy=True, show_progress_bar=False)
np.testing.assert_array_equal(r["means"].index, r["pvalues"].index)
np.testing.assert_array_equal(r["pvalues"].index, r["metadata"].index)
np.testing.assert_array_equal(r["means"].columns, r["pvalues"].columns)
assert not np.array_equal(r["means"].columns, r["metadata"].columns)
assert not np.array_equal(r["pvalues"].columns, r["metadata"].columns)
def test_result_is_sparse(self, adata: AnnData, interactions: Interactions_t):
interactions = pd.DataFrame(interactions, columns=["source", "target"])
if TYPE_CHECKING:
assert isinstance(interactions, pd.DataFrame)
interactions["metadata"] = "foo"
r = ligrec(adata, _CK, interactions=interactions, n_perms=5, seed=2, copy=True, show_progress_bar=False)
assert r["means"].sparse.density <= 0.15
assert r["pvalues"].sparse.density <= 0.95
with pytest.raises(AttributeError, match=r"Can only use the '.sparse' accessor with Sparse data."):
_ = r["metadata"].sparse
np.testing.assert_array_equal(r["metadata"].columns, ["metadata"])
np.testing.assert_array_equal(r["metadata"]["metadata"], interactions["metadata"])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_reproducibility_cores(self, adata: AnnData, interactions: Interactions_t, n_jobs: int):
r1 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=42,
n_jobs=n_jobs,
)
r2 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=42,
n_jobs=n_jobs,
)
r3 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=43,
n_jobs=n_jobs,
)
assert r1 is not r2
np.testing.assert_allclose(r1["means"], r2["means"])
np.testing.assert_allclose(r2["means"], r3["means"])
np.testing.assert_allclose(r1["pvalues"], r2["pvalues"])
assert not np.allclose(r3["pvalues"], r1["pvalues"])
assert not np.allclose(r3["pvalues"], r2["pvalues"])
def test_reproducibility_numba_parallel_off(self, adata: AnnData, interactions: Interactions_t):
t1 = time()
r1 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=42,
numba_parallel=False,
)
t1 = time() - t1
t2 = time()
r2 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=42,
numba_parallel=True,
)
t2 = time() - t2
assert r1 is not r2
# for such a small data, overhead from parallelization is too high
assert t1 <= t2, (t1, t2)
np.testing.assert_allclose(r1["means"], r2["means"])
np.testing.assert_allclose(r1["pvalues"], r2["pvalues"])
def test_paul15_correct_means(self, paul15: AnnData, paul15_means: pd.DataFrame):
res = ligrec(
paul15,
"paul15_clusters",
interactions=list(paul15_means.index.to_list()),
corr_method=None,
copy=True,
show_progress_bar=False,
threshold=0.01,
seed=0,
n_perms=1,
n_jobs=1,
)
np.testing.assert_array_equal(res["means"].index, paul15_means.index)
np.testing.assert_array_equal(res["means"].columns, paul15_means.columns)
np.testing.assert_allclose(res["means"].values, paul15_means.values)
def test_reproducibility_numba_off(
self, adata: AnnData, interactions: Interactions_t, ligrec_no_numba: Mapping[str, pd.DataFrame]
):
r = ligrec(
adata, _CK, interactions=interactions, n_perms=5, copy=True, show_progress_bar=False, seed=42, n_jobs=1
)
np.testing.assert_array_equal(r["means"].index, ligrec_no_numba["means"].index)
np.testing.assert_array_equal(r["means"].columns, ligrec_no_numba["means"].columns)
np.testing.assert_array_equal(r["pvalues"].index, ligrec_no_numba["pvalues"].index)
np.testing.assert_array_equal(r["pvalues"].columns, ligrec_no_numba["pvalues"].columns)
np.testing.assert_allclose(r["means"], ligrec_no_numba["means"])
np.testing.assert_allclose(r["pvalues"], ligrec_no_numba["pvalues"])
np.testing.assert_array_equal(np.where(np.isnan(r["pvalues"])), np.where(np.isnan(ligrec_no_numba["pvalues"])))
def test_logging(self, adata: AnnData, interactions: Interactions_t, capsys):
s.logfile = sys.stderr
s.verbosity = 4
ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
copy=False,
show_progress_bar=False,
complex_policy="all",
key_added="ligrec_test",
n_jobs=2,
)
err = capsys.readouterr().err
assert "DEBUG: Removing duplicate interactions" in err
assert "DEBUG: Removing duplicate genes in the data" in err
assert "DEBUG: Creating all gene combinations within complexes" in err
assert "DEBUG: Removing interactions with no genes in the data" in err
assert "DEBUG: Removing genes not in any interaction" in err
assert "Running `5` permutations on `25` interactions and `25` cluster combinations using `2` core(s)" in err
assert "Adding `adata.uns['ligrec_test']`" in err
def test_non_uniqueness(self, adata: AnnData, interactions: Interactions_t):
# add complexes
expected = {(r.upper(), l.upper()) for r, l in interactions}
interactions += ( # type: ignore
(f"{interactions[-1][0]}_{interactions[-1][1]}", f"{interactions[-2][0]}_{interactions[-2][1]}"),
) * 2
interactions += interactions[:3] # type: ignore
res = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=1,
copy=True,
show_progress_bar=False,
seed=42,
numba_parallel=False,
)
assert len(res["pvalues"]) == len(expected)
assert set(res["pvalues"].index.to_list()) == expected
@pytest.mark.xfail(reason="AnnData cannot handle writing MultiIndex")
def test_writeable(self, adata: AnnData, interactions: Interactions_t, tmpdir):
ligrec(adata, _CK, interactions=interactions, n_perms=5, copy=False, show_progress_bar=False, key_added="foo")
res = adata.uns["foo"]
sc.write(tmpdir / "ligrec.h5ad", adata)
bdata = sc.read(tmpdir / "ligrec.h5ad")
for key in ["means", "pvalues", "metadata"]:
| assert_frame_equal(res[key], bdata.uns["foo"][key]) | pandas.testing.assert_frame_equal |
import pandas as pd
import logging
logger = logging.getLogger(f'cibi.{__file__}')
def make_dataframe(columns, dtypes, index_column=None):
# Stackoverflow-driven development (SDD) powered by
# https://stackoverflow.com/questions/36462257/create-empty-dataframe-in-pandas-specifying-column-types
assert len(columns)==len(dtypes)
df = pd.DataFrame()
for c,d in zip(columns, dtypes):
df[c] = | pd.Series(dtype=d) | pandas.Series |
import pandas as pd
def merger(input, output):
print("Merging kmercount files, this may take a while \n")
samples = [ | pd.read_hdf(x, index_col=0) | pandas.read_hdf |
# Standard packages
import numpy as np
import pandas as pd
import pytz
from datetime import datetime, timedelta
# sktime forecasting models
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.ets import AutoETS
from sktime.forecasting.arima import ARIMA, AutoARIMA
from sktime.forecasting.fbprophet import Prophet
# local forecasting models
from ciforecast.models.periodic_persistence import PeriodicPersistence
# Other local imports
from ciforecast.model_names import CarbonIntensityForecastModels
from ciforecast.util.util import detect_resolution
from ciforecast.carbon_intensity import VALID_ENERGY_MIX_COLUMNS, get_carbon_intensity_time_series
def _make_timezone_naive_utc(datetime_object):
""" If a given datetime is timezone-aware, convert to UTC and make timezone=naive """
if (datetime_object.tzinfo is not None) and (datetime_object.tzinfo.utcoffset(datetime_object) is not None):
# Convert to UTC
utc_time = datetime_object.astimezone(pytz.utc)
# Remove the timezone attribute to make it timezone-naive
utc_time = utc_time.replace(tzinfo=None)
return utc_time
else:
return datetime_object
def _generate_timing_params(data, start=None, end=None, custom_resolution=None):
"""
Generate some helpful parameters related to the timing of the forecast
:param data: <pandas Series or Dataframe> input data
:param start: <timestamp> start of desired forecast
:param end: <timestamp> end of desired forecast
:param custom_resolution: <datetime interval> requested custom resolution for forecast
:return: dict as follows (where all timestamps are timezone-naive UTC-based):
{
'resolution': <datetime interval> resolution of forecast,
'forecast_start': <timestamp> start of forecast that will be returned,
'forecast_end': <timestamp> end of forecast that will be returned,
'timestamps': <list of timestamps> all timestamps of forecast that will be returned,
'gap_start': <timestamp> start of actual generated forecast, which may be earlier than start of
forecast that will be returned,
'full_length': <int> length of full forecast including possible gap at start,
}
"""
# Determine resolution of forecast
if custom_resolution is not None:
resolution = custom_resolution
else:
resolution = detect_resolution(data)
# Set forecast start and end. Default behaviour is 24 hours from end of last data point.
forecast_start = (data.index[-1] + resolution) if (start is None) else start
forecast_end = (data.index[-1] + timedelta(hours=24)) if (end is None) else end
# If either start or end is timezone aware, convert to utc and make timezone naive, for consistency
forecast_start = _make_timezone_naive_utc(forecast_start)
forecast_end = _make_timezone_naive_utc(forecast_end)
# determine timestamps that will be associated with returned forecast values
timestamps = [int(pytz.utc.localize(dt).timestamp())
for dt in pd.Series(pd.date_range(forecast_start, forecast_end, freq=resolution))]
# Check whether the forecast start is same as the first step after the training data.
# If it's not the same, then that means that there is a gap before the forecast start data.
# We want to which of the "steps ahead" are the ones we need for the requested forecast period.
# For example, this could be from 5 steps ahead to 29 steps ahead, from last input data interval.
first_forecast_interval = _make_timezone_naive_utc(data.index[-1] + resolution)
if first_forecast_interval != forecast_start:
gap_size = int((forecast_start - first_forecast_interval) / resolution)
else:
gap_size = 0
forecast_size = int((forecast_end - forecast_start) / resolution) + 1
forecast_indices = np.arange(gap_size, gap_size + forecast_size)
return {
'resolution': resolution,
'timestamps': timestamps,
'indices': forecast_indices,
}
def _extract_time_series(data, time_series_name='values'):
"""
Extract specific time series from provided pandas DataFrame and return as a pandas Series
"""
# If it's already a series, just return it
if isinstance(data, pd.Series):
return data
if time_series_name not in data:
raise ValueError("Could not locate column {} in provided data".format(time_series_name))
return pd.Series(data[time_series_name])
def generate_forecast_for_single_time_series(series, model, start=None, end=None, resolution=None, params=None):
"""
Generate a forecast for a single time series.
For full parameter descriptions see `generate_forecast` below.
:return: pandas dataframe containing timestamps (index) and one columns of forecast values
"""
# Generate params related to forecast timing
timing_params = _generate_timing_params(series, start, end, resolution)
# Create forecaster. Every forecast model has some types of parameters that must be specified
# Some of these forecasting models are standard models from other packages (like sktime), others are unique
# to this package. See imports at top of this file.
# TODO: For now many of these parameters remain hard coded.
# It would be nice if they could have defaults but could also be passed in params.
seasonal_period = int(timedelta(days=1) / timing_params['resolution'])
forecaster = None
# Existing sktime models
if model == CarbonIntensityForecastModels.SEASONAL_NAIVE:
forecaster = NaiveForecaster(strategy="last", sp=seasonal_period)
elif model == CarbonIntensityForecastModels.EXPONENTIAL_SMOOTHING:
forecaster = ExponentialSmoothing(trend="add", seasonal="add", damped_trend=True, sp=seasonal_period)
elif model == CarbonIntensityForecastModels.AUTO_ETS:
forecaster = AutoETS(auto=True, sp=seasonal_period, n_jobs=-1)
elif model == CarbonIntensityForecastModels.AUTO_ARIMA:
forecaster = AutoARIMA(sp=seasonal_period, suppress_warnings=True)
elif model == CarbonIntensityForecastModels.ARIMA:
forecaster = ARIMA(order=(1, 1, 0), seasonal_order=(0, 1, 0, seasonal_period), suppress_warnings=True)
elif model == CarbonIntensityForecastModels.PROPHET:
forecaster = Prophet(
seasonality_mode="multiplicative",
n_changepoints=int(len(series) / seasonal_period),
add_country_holidays={"country_name": "Germany"},
yearly_seasonality=True,
)
# Custom models in this package
elif model == CarbonIntensityForecastModels.PERIODIC_PERSISTENCE:
forecaster = PeriodicPersistence(period=seasonal_period, num_periods=3, weights=[3, 2, 1])
# Fit data. Note that we remove timezone info since this can cause issues with some sktime models.
forecaster.fit(series.tz_convert(None))
# Generate forecast
forecast_values = forecaster.predict(timing_params['indices'])
# Reformat forecast to return the interval requested
return pd.Series(
index=timing_params['timestamps'],
data=forecast_values.values[-len(timing_params['timestamps']):]
)
def generate_forecast_from_ci(data, model, start=None, end=None, resolution=None, params=None):
"""
Generate a carbon intensity forecast using the carbon intensity data provided. For full parameter descriptions
see `generate_forecast` below.
The argument 'params' is a dict that can contain the following:
General
- 'column_name': <str> name of column to use for carbon intensity data (default None)
- TODO: add remaining param options here
:return: pandas dataframe containing timestamps (index) and one columns of forecast values
"""
if (params is None) or ('column_name' not in params):
column_name = 'carbon_intensity'
else:
column_name = params['column_name']
# Extract carbon intensity data
data_ci = _extract_time_series(data, column_name)
return generate_forecast_for_single_time_series(data_ci, model,
start=start, end=end, resolution=resolution,
params=params)
def generate_forecast_from_mix(data, model, start=None, end=None, resolution=None, params=None):
"""
Generate a carbon intensity forecast using the energy generation mix data provided. For parameter descriptions
see `generate_forecast` below.
:return: pandas dataframe containing timestamps (index) and one columns of forecast values
"""
# Get all relevant columns
column_names = []
for column_name in data:
if column_name in VALID_ENERGY_MIX_COLUMNS:
column_names.append(column_name)
# Set forecasting model for each component of energy mix
fc_models = {}
if isinstance(model, CarbonIntensityForecastModels): # if a single model is provided as argument
for column_name in column_names:
fc_models[column_name] = model
elif isinstance(model, dict): # if individual models provided for each energy mix type
fc_models = model
else:
raise ValueError("Argument `model` must be either a dict or of type CarbonIntensityForecastModels")
# TODO Should probably check here that every valid column has an associated forecast model
# TODO Currently this function does not allow for individual parameter settings for each forecasting model
# Generate all forecasts
forecasts = {}
for column_name in fc_models:
series = _extract_time_series(data, column_name)
forecasts[column_name] = generate_forecast_for_single_time_series(series, fc_models[column_name],
start=start, end=end, resolution=resolution,
params=params)
# Calculate carbon intensity forecast
forecasts['carbon_intensity'] = get_carbon_intensity_time_series(pd.DataFrame(forecasts))
# Return pandas dataframe
return | pd.DataFrame(forecasts) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import IsolationForest
import STRING
from sklearn.preprocessing import StandardScaler
def isolation_forest(x, y, contamination=0.1, n_estimators=50, bootstrap=True, max_features=0.33, validation=[]):
if contamination == 'auto':
contamination = y.mean()
print('Contamination Automatized to: %.2f\n' % contamination)
db = IsolationForest(n_estimators=n_estimators, max_samples=500,
bootstrap=bootstrap, verbose=1, random_state=42,
contamination=contamination, max_features=max_features)
db.fit(x)
labels = db.predict(x)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print('CLUSTER NUMBERS ', n_clusters_)
print(labels)
labels = pd.DataFrame(labels, columns=['outliers'], index=y.index)
labels.loc[labels['outliers'] == 1, 'outliers'] = 0
labels.loc[labels['outliers'] == -1, 'outliers'] = 1
precision = metrics.precision_score(y.values, labels.values)
recall = metrics.recall_score(y.values, labels.values)
fbeta = metrics.fbeta_score(y.values, labels.values, beta=2)
print('PRECISION %.4f' % precision)
print('RECALL %.4f' % recall)
print('FB SCORE %.4f' % fbeta)
if validation:
assert validation[0].shape[1] > validation[1].shape[1], 'X valid has less columns than Y valid'
predict_valid = db.predict(validation[0])
predict_valid = pd.DataFrame(predict_valid, columns=['outliers'])
predict_valid.loc[predict_valid['outliers'] == 1, 'outliers'] = 0
predict_valid.loc[predict_valid['outliers'] == -1, 'outliers'] = 1
print('PRECISION VALID %.4f' % metrics.precision_score(validation[1].values, predict_valid.values))
print('RECALL VALID %.4f' % metrics.recall_score(validation[1].values, predict_valid.values))
print('F1 SCORE VALID %.4f' % metrics.f1_score(validation[1].values, predict_valid.values))
return labels, precision, recall, fbeta
if __name__ == '__main__':
import os
seed = 42
np.random.seed(seed)
os.chdir(STRING.path_db)
normal = | pd.read_csv('normal.csv', sep=';', encoding='latin1') | pandas.read_csv |
"""Tests for the sdv.constraints.base module."""
import warnings
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from copulas.multivariate.gaussian import GaussianMultivariate
from copulas.univariate import GaussianUnivariate
from rdt.hyper_transformer import HyperTransformer
from sdv.constraints.base import Constraint, _get_qualified_name, get_subclasses, import_object
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import ColumnFormula, UniqueCombinations
def test__get_qualified_name_class():
"""Test the ``_get_qualified_name`` function, if a class is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a class.
Input:
- A class.
Output:
- The class qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(Constraint)
# Assert
expected_name = 'sdv.constraints.base.Constraint'
assert fully_qualified_name == expected_name
def test__get_qualified_name_function():
"""Test the ``_get_qualified_name`` function, if a function is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a function.
Input:
- A function.
Output:
- The function qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(_get_qualified_name)
# Assert
expected_name = 'sdv.constraints.base._get_qualified_name'
assert fully_qualified_name == expected_name
def test_get_subclasses():
"""Test the ``get_subclasses`` function.
The ``get_subclasses`` function is expected to:
- Recursively find subclasses for the class object passed.
Setup:
- Create three classes, Parent, Child and GrandChild,
which inherit of each other hierarchically.
Input:
- The Parent class.
Output:
- Dict of the subclasses of the class: ``Child`` and ``GrandChild`` classes.
"""
# Setup
class Parent:
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
# Run
subclasses = get_subclasses(Parent)
# Assert
expected_subclasses = {
'Child': Child,
'GrandChild': GrandChild
}
assert subclasses == expected_subclasses
def test_import_object_class():
"""Test the ``import_object`` function, when importing a class.
The ``import_object`` function is expected to:
- Import a class from its qualifed name.
Input:
- Qualified name of the class.
Output:
- The imported class.
"""
# Run
obj = import_object('sdv.constraints.base.Constraint')
# Assert
assert obj is Constraint
def test_import_object_function():
"""Test the ``import_object`` function, when importing a function.
The ``import_object`` function is expected to:
- Import a function from its qualifed name.
Input:
- Qualified name of the function.
Output:
- The imported function.
"""
# Run
imported = import_object('sdv.constraints.base.import_object')
# Assert
assert imported is import_object
class TestConstraint():
def test__identity(self):
"""Test ```Constraint._identity`` method.
``_identity`` method should return whatever it is passed.
Input:
- anything
Output:
- Input
"""
# Run
instance = Constraint('all')
output = instance._identity('input')
# Asserts
assert output == 'input'
def test___init___transform(self):
"""Test ```Constraint.__init__`` method when 'transform' is passed.
If 'transform' is given, the ``__init__`` method should replace the ``is_valid`` method
with an identity and leave ``transform`` and ``reverse_transform`` untouched.
Input:
- transform
Side effects:
- is_valid == identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='transform')
# Asserts
assert instance.filter_valid == instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___reject_sampling(self):
"""Test ``Constraint.__init__`` method when 'reject_sampling' is passed.
If 'reject_sampling' is given, the ``__init__`` method should replace the ``transform``
and ``reverse_transform`` methods with an identity and leave ``is_valid`` untouched.
Input:
- reject_sampling
Side effects:
- is_valid != identity
- transform == identity
- reverse_transform == identity
"""
# Run
instance = Constraint(handling_strategy='reject_sampling')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform == instance._identity
assert instance.reverse_transform == instance._identity
def test___init___all(self):
"""Test ``Constraint.__init__`` method when 'all' is passed.
If 'all' is given, the ``__init__`` method should leave ``transform``,
``reverse_transform`` and ``is_valid`` untouched.
Input:
- all
Side effects:
- is_valid != identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='all')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___not_kown(self):
"""Test ``Constraint.__init__`` method when a not known ``handling_strategy`` is passed.
If a not known ``handling_strategy`` is given, a ValueError is raised.
Input:
- not_known
Side effects:
- ValueError
"""
# Run
with pytest.raises(ValueError):
Constraint(handling_strategy='not_known')
def test_fit(self):
"""Test the ``Constraint.fit`` method.
The base ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._fit = Mock()
# Run
instance.fit(table_data)
# Assert
instance._fit.assert_called_once_with(table_data)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
def test_fit_gaussian_multivariate_correct_distribution(self, gm_mock):
"""Test the ``GaussianMultivariate`` from the ``Constraint.fit`` method.
The ``GaussianMultivariate`` is expected to be called with default distribution
set as ``GaussianUnivariate``.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.assert_called_once_with(distribution=GaussianUnivariate)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
@patch('sdv.constraints.base.HyperTransformer', spec_set=HyperTransformer)
def test_fit_trains_column_model(self, ht_mock, gm_mock):
"""Test the ``Constraint.fit`` method trains the column model.
When ``fit_columns_model`` is True and there are multiple ``constraint_columns``,
the ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
- Create ``_hyper_transformer``.
- Create ``_column_model`` and train it.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.return_value.fit.assert_called_once()
calls = ht_mock.return_value.fit_transform.mock_calls
args = calls[0][1]
assert len(calls) == 1
pd.testing.assert_frame_equal(args[0], table_data)
def test_transform(self):
"""Test the ``Constraint.transform`` method.
It is an identity method for completion, to be optionally
overwritten by subclasses.
The ``Constraint.transform`` method is expected to:
- Return the input data unmodified.
Input:
- Anything
Output:
- Input
"""
# Run
instance = Constraint(handling_strategy='transform')
output = instance.transform('input')
# Assert
assert output == 'input'
def test_transform_calls__transform(self):
"""Test that the ``Constraint.transform`` method calls ``_transform``.
The ``Constraint.transform`` method is expected to:
- Return value returned by ``_transform``.
Input:
- Anything
Output:
- Result of ``_transform(input)``
"""
# Setup
constraint_mock = Mock()
constraint_mock.fit_columns_model = False
constraint_mock._transform.return_value = 'the_transformed_data'
constraint_mock._validate_columns.return_value = pd.DataFrame()
# Run
output = Constraint.transform(constraint_mock, 'input')
# Assert
assert output == 'the_transformed_data'
def test_transform_model_disabled_any_columns_missing(self):
"""Test the ``Constraint.transform`` method with invalid data.
If ``table_data`` is missing any columns and ``fit_columns_model``
is False, it should raise a ``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c']))
def test_transform_model_enabled_all_columns_missing(self):
"""Test the ``Constraint.transform`` method with missing columns.
If ``table_data`` is missing all of the ``constraint_columns`` and
``fit_columns_model`` is True, it should raise a
``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame())
def test_transform_model_enabled_some_columns_missing(self):
"""Test that the ``Constraint.transform`` method uses column model.
If ``table_data`` is missing some of the ``constraint_columns``,
the ``_column_model`` should be used to sample the rest and the
data should be transformed.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
conditions = [
pd.DataFrame([[5, 1, 2]], columns=['a', 'b', 'c']),
pd.DataFrame([[6, 3, 4]], columns=['a', 'b', 'c'])
]
transformed_conditions = [
pd.DataFrame([[1]], columns=['b']),
pd.DataFrame([[3]], columns=['b'])
]
instance._columns_model.sample.return_value = pd.DataFrame([
[1, 2, 3]
], columns=['b', 'c', 'a'])
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform.side_effect = conditions
# Run
data = pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c'])
transformed_data = instance.transform(data)
# Assert
expected_tranformed_data = pd.DataFrame([[1, 2, 3]], columns=['b', 'c', 'a'])
expected_result = pd.DataFrame([
[5, 1, 2],
[6, 3, 4]
], columns=['a', 'b', 'c'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 2
instance._columns_model.sample.assert_any_call(num_rows=1, conditions={'b': 1})
instance._columns_model.sample.assert_any_call(num_rows=1, conditions={'b': 3})
reverse_transform_calls = instance._hyper_transformer.reverse_transform.mock_calls
pd.testing.assert_frame_equal(reverse_transform_calls[0][1][0], expected_tranformed_data)
pd.testing.assert_frame_equal(reverse_transform_calls[1][1][0], expected_tranformed_data)
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_transform_model_enabled_reject_sampling(self):
"""Test the ``Constraint.transform`` method's reject sampling.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows.
Setup:
- The ``_columns_model`` returns some valid_rows the first time,
and then the rest with the next call.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = [pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])]
instance._columns_model.sample.side_effect = [
pd.DataFrame([
[1, 2],
[1, 3]
], columns=['a', 'b']),
pd.DataFrame([
[1, 4],
[1, 5],
[1, 6],
[1, 7]
], columns=['a', 'b']),
]
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform = lambda x: x
# Run
data = pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])
transformed_data = instance.transform(data)
# Assert
expected_result = pd.DataFrame([
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6]
], columns=['a', 'b'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 2
instance._columns_model.sample.assert_any_call(num_rows=5, conditions={'b': 1})
assert model_calls[1][2]['num_rows'] > 3
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_transform_model_enabled_reject_sampling_error(self):
"""Test that the ``Constraint.transform`` method raises an error appropriately.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows. If it doesn't
get any valid rows in 100 tries, a ``ValueError`` is raised.
Setup:
- The ``_columns_model`` is fixed to always return an empty ``DataFrame``.
Input:
- Table with some missing columns.
Side Effect:
- ``ValueError`` raised.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = pd.DataFrame([[1]], columns=['b'])
instance._columns_model.sample.return_value = pd.DataFrame()
instance._hyper_transformer.transform.return_value = transformed_conditions
instance._hyper_transformer.reverse_transform.return_value = pd.DataFrame()
# Run / Assert
data = pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c'])
with pytest.raises(ValueError):
instance.transform(data)
def test_transform_model_enabled_reject_sampling_duplicates_valid_rows(self):
"""Test the ``Constraint.transform`` method's reject sampling fall back.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows. If after 100
tries, some valid rows are created but not enough, then the valid rows
are duplicated to meet the ``num_rows`` requirement.
Setup:
- The ``_columns_model`` returns some valid rows the first time, and then
an empy ``DataFrame`` for every other call.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = [pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])]
instance._columns_model.sample.side_effect = [
pd.DataFrame([
[1, 2],
[1, 3]
], columns=['a', 'b'])
] + [pd.DataFrame()] * 100
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform = lambda x: x
# Run
data = pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])
transformed_data = instance.transform(data)
# Assert
expected_result = pd.DataFrame([
[1, 2],
[1, 3],
[1, 2],
[1, 3],
[1, 2]
], columns=['a', 'b'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 101
instance._columns_model.sample.assert_any_call(num_rows=5, conditions={'b': 1})
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_fit_transform(self):
"""Test the ``Constraint.fit_transform`` method.
The ``Constraint.fit_transform`` method is expected to:
- Call the ``fit`` method.
- Call the ``transform`` method.
- Return the input data unmodified.
Input:
- Anything
Output:
- self.transform output
Side Effects:
- self.fit is called with input
- self.transform is called with input
"""
# Setup
constraint_mock = Mock()
constraint_mock.transform.return_value = 'the_transformed_data'
# Run
data = 'my_data'
output = Constraint.fit_transform(constraint_mock, data)
# Assert
assert output == 'the_transformed_data'
constraint_mock.fit.assert_called_once_with('my_data')
constraint_mock.transform.assert_called_once_with('my_data')
def test_reverse_transform(self):
"""Test the ``Constraint.reverse_transform`` method. It is an identity method
for completion, to be optionally overwritten by subclasses.
The ``Constraint.reverse_transform`` method is expected to:
- Return the input data unmodified.
Input:
- Anything
Output:
- Input
"""
# Run
instance = Constraint(handling_strategy='transform')
output = instance.reverse_transform('input')
# Assert
assert output == 'input'
def test_is_valid(self):
"""Test the ``Constraint.is_valid` method. This should be overwritten by all the
subclasses that have a way to decide which rows are valid and which are not.
The ``Constraint.is_valid`` method is expected to:
- Say whether the given table rows are valid.
Input:
- Table data (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
# Run
instance = Constraint(handling_strategy='transform')
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_filter_valid(self):
"""Test the ``Constraint.filter_valid`` method.
The ``Constraint.filter_valid`` method is expected to:
- Filter the input data by calling the method ``is_valid``.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, with only the valid rows (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
constraint_mock = Mock()
constraint_mock.is_valid.return_value = pd.Series([True, True, False])
# Run
out = Constraint.filter_valid(constraint_mock, table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_filter_valid_with_invalid_index(self):
"""Test the ``Constraint.filter_valid`` method.
Tests when the is_valid method returns a Series with an invalid index.
Note: `is_valid.index` can be [0, 1, 5] if, for example, the Series is a subset
of an original table with 10 rows, but only rows 0/1/5 were selected.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, with only the valid rows (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
constraint_mock = Mock()
is_valid = pd.Series([True, True, False])
is_valid.index = [0, 1, 5]
constraint_mock.is_valid.return_value = is_valid
# Run
out = Constraint.filter_valid(constraint_mock, table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2]
})
| pd.testing.assert_frame_equal(expected_out, out) | pandas.testing.assert_frame_equal |
#import stuff
import numpy as np
import pandas as pd
from kneed import KneeLocator
def confusionMatrix(predicted_clone, real_label):
conf_df = pd.DataFrame(data={'vireo': predicted_clone, 'real_label': real_label})
confusion_matrix = pd.crosstab(conf_df['vireo'], conf_df['real_label'], rownames=['Predicted'], colnames=['Actual'])
#of those cases predicted to belong to class c, which fraction truly belongs to class c?
precision = np.mean(confusion_matrix.max(axis=1)/confusion_matrix.sum(axis=1))
#proportion of cases correctly identified as belonging to class c among all cases that truly belong to class c
recall = np.mean(confusion_matrix.max(axis=0)/confusion_matrix.sum(axis=0))
print('Precision = ' + str(precision))
print('Recall = ' + str(recall))
return confusion_matrix
def plot_confusionMatrix(mat, ax, cmap = 'Blues'):
width, height = np.array(mat).shape
text_colors = ['black', 'white']
norm_conf = []
for i in np.array(mat):
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j)/float(a))
norm_conf.append(tmp_arr)
res = ax.imshow(np.array(norm_conf), cmap=cmap,
interpolation='nearest')
for x in range(width):
for y in range(height):
ax.annotate(str(np.array(mat)[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center', color=text_colors[int(norm_conf[x][y] > 0.5)])
return res
def alleleFreqMatrix(AD, DP, fillna = True):
#takes sparse AD and DP, returns dense AF matrix for plotting
AD_df = pd.DataFrame(AD.todense())
DP_df = pd.DataFrame(DP.todense())
AF_df = AD_df/DP_df
if fillna:
AF_df = AF_df.fillna(0)
return AF_df
def findKnee(BIC, sens=3):
#Wrapper function for knee point locator given a series of deltaBIC
#Remove negative BICs first
BIC = BIC[BIC > 0]
#Remove outliers first (Q3 + 1.5 IQR)
q1 = np.percentile(BIC, 25)
q3 = np.percentile(BIC, 75)
iqr = q3-q1
t = q3 + 1.5*iqr ##threshold to determine outliers
#print(t)
## if t is too small (ie. < 10k), set t to 10k
if t < 10000: t = 10000
## remove outliers
filtered_BIC = BIC[BIC <= t]
y = np.sort(filtered_BIC.astype(float))
x = np.linspace(0, 1, len(filtered_BIC)+1)[1:]
kl = KneeLocator(x, y, curve="convex", direction="increasing", S=sens)
#print(kl.knee_y)
return x, y, kl.knee, kl.knee_y
if __name__ == '__main__':
test = | pd.read_csv('test/BIC_params.csv') | pandas.read_csv |
from typing import (
Any,
Dict,
List,
Tuple,
Union,
Mapping,
TypeVar,
Callable,
Optional,
Sequence,
)
from copy import copy
from pathlib import Path
from itertools import combinations
from collections import namedtuple, defaultdict
from anndata import AnnData
from cellrank import logging as logg
from cellrank.tl._enum import _DEFAULT_BACKEND
from cellrank.ul._docs import d
from cellrank.tl._utils import save_fig, _unique_order_preserving
from cellrank.ul.models import GAMR, BaseModel, FailedModel, SKLearnModel
from cellrank.tl._colors import _create_categorical_colors
from cellrank.ul._parallelize import parallelize
from cellrank.ul.models._base_model import ColorType
import numpy as np
import pandas as pd
from pandas.api.types import infer_dtype, is_numeric_dtype, is_categorical_dtype
import matplotlib as mpl
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
Queue = TypeVar("Queue")
Graph = TypeVar("Graph")
_ERROR_INCOMPLETE_SPEC = (
"No options were specified for {}. "
"Consider specifying a fallback model using '*'."
)
_time_range_type = Optional[Union[float, Tuple[Optional[float], Optional[float]]]]
_return_model_type = Mapping[str, Mapping[str, BaseModel]]
_input_model_type = Union[BaseModel, _return_model_type]
_callback_type = Optional[Union[Callable, Mapping[str, Mapping[str, Callable]]]]
BulkRes = namedtuple("BulkRes", ["x_test", "y_test"])
def _curved_edges(
G: Graph,
pos: Mapping,
radius_fraction: float,
dist_ratio: float = 0.2,
bezier_precision: int = 20,
polarity: str = "directed",
) -> np.ndarray:
"""
Create curved edges from a graph. Modified from: https://github.com/beyondbeneath/bezier-curved-edges-networkx.
Parameters
----------
G: :class:`networkx.Graph`
Graph for which to create curved edges.
pos
Mapping of nodes to positions.
radius_fraction
Fraction of a unit circle when self loops are present.
dist_ratio
Distance of control points of bezier curves.
bezier_precision
Number of points in the curves.
polarity
Polarity of curves, one of `'random', 'directed' or 'fixed'`.
If using `'random'`, incoming and outgoing edges may overlap.
Returns
-------
Array of shape ``(n_edges, bezier_precision, 2)`` containing the curved edges.
"""
try:
import bezier
except ImportError as e:
raise ImportError("Please install `bezier` as `pip install bezier`.") from e
# Get nodes into np array
edges = np.array(G.edges())
n_edges = edges.shape[0]
self_loop_mask = edges[:, 0] == edges[:, 1]
pos_sl = {edge[0]: pos[edge[0]] for edge in edges[self_loop_mask, ...]}
if polarity == "random":
# Random polarity of curve
rnd = np.where(np.random.randint(2, size=n_edges) == 0, -1, 1)
elif polarity == "directed":
rnd = np.where(edges[:, 0] > edges[:, 1], -1, 1)
elif polarity == "fixed":
# Create a fixed (hashed) polarity column in the case we use fixed polarity
# This is useful, e.g., for animations
rnd = np.where(
np.mod(np.vectorize(hash)(edges[:, 0]) + np.vectorize(hash)(edges[:, 1]), 2)
== 0,
-1,
1,
)
else:
raise ValueError(
f"Polarity `{polarity!r}` is not a valid option. "
f"Valid options are: `'random', 'directed' or 'fixed'`."
)
# Coordinates (x, y) of both nodes for each edge
# Note the np.vectorize method doesn't work for all node position dictionaries for some reason
u, inv = np.unique(edges, return_inverse=True)
coords = np.array([pos[x] for x in u])[inv].reshape(
[edges.shape[0], 2, edges.shape[1]]
)
coords_node1 = coords[:, 0, :]
coords_node2 = coords[:, 1, :]
# Swap node1/node2 allocations to make sure the directionality works correctly
should_swap = coords_node1[:, 0] > coords_node2[:, 0]
coords_node1[should_swap], coords_node2[should_swap] = (
coords_node2[should_swap],
coords_node1[should_swap],
)
# Distance for control points
dist = dist_ratio * np.sqrt(np.sum((coords_node1 - coords_node2) ** 2, axis=1))
# Gradients of line connecting node & perpendicular
m1 = (coords_node2[:, 1] - coords_node1[:, 1]) / (
coords_node2[:, 0] - coords_node1[:, 0]
)
m2 = -1 / m1
# Temporary points along the line which connects two nodes
t1 = dist / np.sqrt(1 + m1 ** 2)
v1 = np.array([np.ones(n_edges), m1])
coords_node1_displace = coords_node1 + (v1 * t1).T
coords_node2_displace = coords_node2 - (v1 * t1).T
# Control points, same distance but along perpendicular line
# rnd gives the 'polarity' to determine which side of the line the curve should arc
t2 = dist / np.sqrt(1 + m2 ** 2)
v2 = np.array([np.ones(len(edges)), m2])
coords_node1_ctrl = coords_node1_displace + (rnd * v2 * t2).T
coords_node2_ctrl = coords_node2_displace + (rnd * v2 * t2).T
# Combine all these four (x,y) columns into a 'node matrix'
node_matrix = np.array(
[coords_node1, coords_node1_ctrl, coords_node2_ctrl, coords_node2]
)
nums = np.linspace(0, 2 * np.pi, bezier_precision)
# Create the Bezier curves and store them in a list
self_loops = []
for p in pos_sl.values():
self_loops.append(np.c_[np.cos(nums), np.sin(nums)] * radius_fraction + p)
curveplots = []
for i in range(len(edges)):
nodes = node_matrix[:, i, :].T
curveplots.append(
bezier.Curve(nodes, degree=3)
.evaluate_multi(np.linspace(0, 1, bezier_precision))
.T
)
# Return an array of these curves
curves = np.array(curveplots)
if np.any(self_loop_mask):
curves[self_loop_mask, ...] = self_loops
return curves
def _is_any_gam_mgcv(models: Union[BaseModel, Dict[str, Dict[str, BaseModel]]]) -> bool:
"""
Return whether any models to be fit are from R's `mgcv` package.
Parameters
----------
models
Model(s) used for fitting.
Returns
-------
`True` if any of the models is from R's mgcv package, else `False`.
"""
return isinstance(models, GAMR) or (
isinstance(models, dict)
and any(isinstance(m, GAMR) for ms in models.values() for m in ms.values())
)
def _create_models(
model: _input_model_type, obs: Sequence[str], lineages: Sequence[Optional[str]]
) -> _return_model_type:
"""
Create models for each gene and lineage.
Parameters
----------
obs
Sequence of observations, such as genes.
lineages
Sequence of genes.
Returns
-------
The created models.
"""
def process_lineages(
obs_name: str, lin_names: Union[BaseModel, Dict[Optional[str], Any]]
):
if isinstance(lin_names, BaseModel):
# sharing the same models for all lineages
for lin_name in lineages:
models[obs_name][lin_name] = copy(lin_names)
return
if not isinstance(lin_names, dict):
raise TypeError(
f"Expected the model to be either a lineage specific `dict` or a `BaseModel`, "
f"found `{type(lin_names).__name__!r}`."
)
lin_rest_model = lin_names.get("*", None) # do not pop
if lin_rest_model is not None and not isinstance(lin_rest_model, BaseModel):
raise TypeError(
f"Expected the lineage fallback model for gene `{obs_name!r}` to be of type `BaseModel`, "
f"found `{type(lin_rest_model).__name__!r}`."
)
for lin_name, mod in lin_names.items():
if lin_name == "*":
continue
if not isinstance(mod, BaseModel):
raise TypeError(
f"Expected the model for gene `{obs_name!r}` and lineage `{lin_name!r}` "
f"to be of type `BaseModel`, found `{type(mod).__name__!r}`."
)
models[obs_name][lin_name] = copy(mod)
if set(models[obs_name].keys()) & lineages == lineages:
return
if lin_rest_model is not None:
for lin_name in lineages - set(models[obs_name].keys()):
models[obs_name][lin_name] = copy(lin_rest_model)
else:
raise ValueError(
_ERROR_INCOMPLETE_SPEC.format(f"all lineages for gene `{obs_name!r}`")
)
if not len(lineages):
raise ValueError("No lineages have been selected.")
if not len(obs):
raise ValueError("No genes have been selected.")
if isinstance(model, BaseModel):
return {
o: {lin: copy(model) for lin in _unique_order_preserving(lineages)}
for o in _unique_order_preserving(obs)
}
lineages, obs = (
set(_unique_order_preserving(lineages)),
set(_unique_order_preserving(obs)),
)
models = defaultdict(dict)
if isinstance(model, dict):
obs_rest_model = model.pop("*", None)
if obs_rest_model is not None and not isinstance(obs_rest_model, BaseModel):
raise TypeError(
f"Expected the gene fallback model to be of type `BaseModel`, "
f"found `{type(obs_rest_model).__name__!r}`."
)
for obs_name, lin_names in model.items():
process_lineages(obs_name, lin_names)
if obs_rest_model is not None:
for obs_name in obs - set(model.keys()):
process_lineages(obs_name, model.get(obs_name, obs_rest_model))
elif set(model.keys()) != obs:
raise ValueError(
_ERROR_INCOMPLETE_SPEC.format(
f"genes `{list(obs - set(model.keys()))}`."
)
)
else:
raise TypeError(
f"Class `{type(model).__name__!r}` must be of type `BaseModel` or "
f"a gene and lineage specific `dict` of `BaseModel`.."
)
if set(models.keys()) & obs != obs:
raise ValueError(
f"Missing gene models for the following genes: `{list(obs - set(models.keys()))}`."
)
for gene, vs in models.items():
if set(vs.keys()) & lineages != lineages:
raise ValueError(
f"Missing lineage models for the gene `{gene!r}`: `{list(lineages - set(vs.keys()))}`."
)
return models
def _fit_bulk_helper(
genes: Sequence[str],
models: _input_model_type,
callbacks: _callback_type,
lineages: Sequence[Optional[str]],
time_range: Sequence[Union[float, Tuple[float, float]]],
return_models: bool = False,
queue: Optional[Queue] = None,
**kwargs,
) -> Dict[str, Dict[str, BaseModel]]:
"""
Fit model for given genes and lineages.
Parameters
----------
genes
Genes for which to fit the models.
models
Gene and lineage specific models.
callbacks
Gene and lineage specific prepare callbacks.
lineages
Lineages for which to fit the models.
time_range
Minimum and maximum pseudotimes.
return_models
Whether to return the full models or just tuple ``(x_test, y_test)``.
queue
Signalling queue in the parent process/thread used to update the progress bar.
kwargs
Keyword arguments for :func:`cellrank.ul.models.BaseModel.prepare`.
Returns
-------
The fitted models, optionally containing the confidence interval in the form of
`{'gene1': {'lineage1': <model11>, ...}, ...}`.
If any step has failed, the model will be of type :class:`cellrank.ul.models.FailedModel`.
"""
if len(lineages) != len(time_range):
raise ValueError(
f"Expected `lineage` and `time_range` to be of same length, "
f"found `{len(lineages)}` != `{len(time_range)}`."
)
conf_int = return_models and kwargs.pop("conf_int", False)
res = {}
for gene in genes:
res[gene] = {}
for ln, tr in zip(lineages, time_range):
cb = callbacks[gene][ln]
model = models[gene][ln]
model._is_bulk = True
model = cb(model, gene=gene, lineage=ln, time_range=tr, **kwargs)
model = model.fit()
# GAMR is a bit faster if we don't need the conf int
# if it's needed, `.predict` will calculate it and `confidence_interval` will do nothing
if not conf_int:
model.predict()
elif _is_any_gam_mgcv(model):
model.predict(level=conf_int if isinstance(conf_int, float) else 0.95)
else:
model.predict()
model.confidence_interval()
res[gene][ln] = (
model if return_models else BulkRes(model.x_test, model.y_test)
)
if queue is not None:
queue.put(1)
if queue is not None:
queue.put(None)
return res
def _fit_bulk(
models: Mapping[str, Mapping[str, Callable]],
callbacks: Mapping[str, Mapping[str, Callable]],
genes: Union[str, Sequence[str]],
lineages: Union[str, Sequence[str]],
time_range: _time_range_type,
parallel_kwargs: dict,
return_models: bool = False,
filter_all_failed: bool = True,
**kwargs,
) -> Tuple[_return_model_type, _return_model_type, Sequence[str], Sequence[str]]:
"""
Fit models for given genes and lineages.
Parameters
----------
models
Gene and lineage specific estimators.
callbacks
Functions which are called to prepare the ``models`.
genes
Genes for which to fit the ``models``.
lineages
Lineages for which to fit the ``models``.
time_range
Possibly ``lineages`` specific start- and endtimes.
parallel_kwargs
Keyword arguments for :func:`cellrank.ul._utils.parallelize`.
return_models
Whether to return the full models or just a dictionary of dictionaries of :class:`collections.namedtuple`,
`(x_test, y_test)`. This is highly discouraged because no meaningful error messages will be produced.
filter_all_failed
Whether to filter out all models which have failed.
Returns
-------
All the models, including the failed ones. It is a nested dictionary where keys are the ``genes`` and the values
is again a :class:`dict`, where keys are ``lineages`` and values are the failed or fitted models or
the :class:`collections.namedtuple`, based on ``return_models = True``.
Same as above, but can contain failed models if ``filter_all_failed=False``. In that case, it is guaranteed
that this dictionary will contain only genes which have been successfully fitted for at least 1 lineage.
If ``return_models = True``, the models are just a :class:`collections.namedtuple` of `(x_test, y_test)`.
All the genes of the filtered models.
All the lineage of the filtered models.
"""
if isinstance(genes, str):
genes = [genes]
if isinstance(lineages, str):
lineages = [lineages]
if isinstance(time_range, (tuple, float, int, type(None))):
time_range = [time_range] * len(lineages)
elif len(time_range) != len(lineages):
raise ValueError(
f"Expected time ranges to be of length `{len(lineages)}`, found `{len(time_range)}`."
)
n_jobs = parallel_kwargs.pop("n_jobs", 1)
start = logg.info(f"Computing trends using `{n_jobs}` core(s)")
models = parallelize(
_fit_bulk_helper,
genes,
unit="gene" if kwargs.get("data_key", "gene") != "obs" else "obs",
n_jobs=n_jobs,
extractor=lambda modelss: {k: v for m in modelss for k, v in m.items()},
)(
models=models,
callbacks=callbacks,
lineages=lineages,
time_range=time_range,
return_models=return_models,
**kwargs,
)
logg.info(" Finish", time=start)
return _filter_models(
models, return_models=return_models, filter_all_failed=filter_all_failed
)
def _filter_models(
models, return_models: bool = False, filter_all_failed: bool = True
) -> Tuple[_return_model_type, _return_model_type, Sequence[str], Sequence[str]]:
def is_valid(x: Union[BaseModel, BulkRes]) -> bool:
if return_models:
assert isinstance(
x, BaseModel
), f"Expected `BaseModel`, found `{type(x).__name__!r}`."
return bool(x)
return (
x.x_test is not None
and x.y_test is not None
and np.all(np.isfinite(x.y_test))
)
modelmat = pd.DataFrame(models).T
modelmask = modelmat.applymap(is_valid)
to_keep = modelmask[modelmask.any(axis=1)]
to_keep = to_keep.loc[:, to_keep.any(axis=0)].T
filtered_models = {
gene: {
ln: models[gene][ln]
for ln in (
ln
for ln in v.keys()
if (is_valid(models[gene][ln]) if filter_all_failed else True)
)
}
for gene, v in to_keep.to_dict().items()
}
if not len(filtered_models):
if not return_models:
raise RuntimeError(
"Fitting has failed for all gene/lineage combinations. "
"Specify `return_models=True` for more information."
)
for ms in models.values():
for model in ms.values():
assert isinstance(
model, FailedModel
), f"Expected `FailedModel`, found `{type(model).__name__!r}`."
model.reraise()
if not np.all(modelmask.values):
failed_models = modelmat.values[~modelmask.values]
logg.warning(
f"Unable to fit `{len(failed_models)}` models." + ""
if return_models
else "Consider specify `return_models=True` for further inspection."
)
logg.debug(
"The failed models were:\n`{}`".format(
"\n".join(f" {m}" for m in failed_models)
)
)
# lineages is the max number of lineages
return models, filtered_models, tuple(filtered_models.keys()), tuple(to_keep.index)
@d.dedent
def _trends_helper(
models: Dict[str, Dict[str, Any]],
gene: str,
transpose: bool = False,
lineage_names: Optional[Sequence[str]] = None,
same_plot: bool = False,
sharey: Union[str, bool] = False,
show_ylabel: bool = True,
show_lineage: Union[bool, np.ndarray] = True,
show_xticks_and_label: Union[bool, np.ndarray] = True,
lineage_cmap: Optional[Union[mpl.colors.ListedColormap, Sequence]] = None,
lineage_probability_color: Optional[str] = None,
abs_prob_cmap=cm.viridis,
gene_as_title: bool = False,
cell_color: Optional[str] = None,
legend_loc: Optional[str] = "best",
fig: mpl.figure.Figure = None,
axes: Union[mpl.axes.Axes, Sequence[mpl.axes.Axes]] = None,
**kwargs: Any,
) -> None:
"""
Plot an expression gene for some lineages.
Parameters
----------
%(adata)s
%(model)s
gene
Name of the gene in `adata.var_names``.
ln_key
Key in ``adata.obsm`` where to find the lineages.
lineage_names
Names of lineages to plot.
same_plot
Whether to plot all lineages in the same plot or separately.
sharey
Whether the y-axis is being shared.
show_ylabel
Whether to show y-label on the y-axis. Usually, only the first column will contain the y-label.
show_lineage
Whether to show the lineage as the title. Usually, only first row will contain the lineage names.
show_xticks_and_label
Whether to show x-ticks and x-label. Usually, only the last row will show this.
lineage_cmap
Colormap to use when coloring the the lineage. When ``transpose``, this corresponds to the color of genes.
lineage_probability_color
Actual color of 1 ``lineage``. Only used when ``same_plot=True`` and ``transpose=True`` and
``lineage_probability=True``.
abs_prob_cmap:
Colormap to use when coloring in the absorption probabilities, if they are being plotted.
gene_as_title
Whether to use the gene names as titles (with lineage names as well) or on the y-axis.
legend_loc
Location of the legend. If `None`, don't show any legend.
fig
Figure to use.
ax
Ax to use.
kwargs
Keyword arguments for :meth:`cellrank.ul.models.BaseModel.plot`.
Returns
-------
%(just_plots)s
"""
n_lineages = len(lineage_names)
if same_plot:
axes = [axes] * len(lineage_names)
axes = np.ravel(axes)
percs = kwargs.pop("perc", None)
if percs is None or not isinstance(percs[0], (tuple, list)):
percs = [percs]
same_perc = False # we need to show colorbar always if percs differ
if len(percs) != n_lineages or n_lineages == 1:
if len(percs) != 1:
raise ValueError(
f"Percentile must be a collection of size `1` or `{n_lineages}`, got `{len(percs)}`."
)
same_perc = True
percs = percs * n_lineages
hide_cells = kwargs.pop("hide_cells", False)
show_cbar = kwargs.pop("cbar", True)
show_prob = kwargs.pop("lineage_probability", False)
if same_plot:
if not transpose:
lineage_colors = (
lineage_cmap.colors
if lineage_cmap is not None and hasattr(lineage_cmap, "colors")
else lineage_cmap
)
else:
# this should be fine w.r.t. to the missing genes, since they are in the same order AND
# we're also passing the failed models (this is important)
# these are actually gene colors
if lineage_cmap is not None:
lineage_colors = (
lineage_cmap.colors
if hasattr(lineage_cmap, "colors")
else [c for _, c in zip(lineage_names, lineage_cmap)]
)
else:
lineage_colors = _create_categorical_colors(n_lineages)
else:
lineage_colors = (
("black" if not mcolors.is_color_like(lineage_cmap) else lineage_cmap),
) * n_lineages
if n_lineages > len(lineage_colors):
raise ValueError(
f"Expected at least `{n_lineages}` colors, found `{len(lineage_colors)}`."
)
lineage_color_mapper = {ln: lineage_colors[i] for i, ln in enumerate(lineage_names)}
successful_models = {
ln: models[gene][ln] for ln in lineage_names if models[gene][ln]
}
if show_prob and same_plot:
minns, maxxs = zip(
*(
models[gene][n]._return_min_max(
show_conf_int=kwargs.get("conf_int", False),
)
for n in lineage_names
)
)
minn, maxx = min(minns), max(maxxs)
kwargs["loc"] = legend_loc
kwargs["scaler"] = lambda x: (x - minn) / (maxx - minn)
else:
kwargs["loc"] = None
if isinstance(show_xticks_and_label, bool):
show_xticks_and_label = [show_xticks_and_label] * len(lineage_names)
elif len(show_xticks_and_label) != len(lineage_names):
raise ValueError(
f"Expected `show_xticks_label` to be the same length as `lineage_names`, "
f"found `{len(show_xticks_and_label)}` != `{len(lineage_names)}`."
)
if isinstance(show_lineage, bool):
show_lineage = [show_lineage] * len(lineage_names)
elif len(show_lineage) != len(lineage_names):
raise ValueError(
f"Expected `show_lineage` to be the same length as `lineage_names`, "
f"found `{len(show_lineage)}` != `{len(lineage_names)}`."
)
last_ax = None
ylabel_shown = False
cells_shown = False
obs_legend_loc = kwargs.pop("obs_legend_loc", "best")
for i, (name, ax, perc) in enumerate(zip(lineage_names, axes, percs)):
model = models[gene][name]
if isinstance(model, FailedModel):
if not same_plot:
ax.remove()
continue
if same_plot:
if gene_as_title:
title = gene
ylabel = "expression" if show_ylabel else None
else:
title = ""
ylabel = gene
else:
if gene_as_title:
title = None
ylabel = "expression" if not ylabel_shown else None
else:
title = (
(name if name is not None else "no lineage")
if show_lineage[i]
else ""
)
ylabel = gene if not ylabel_shown else None
model.plot(
ax=ax,
fig=fig,
perc=perc,
cell_color=cell_color,
cbar=False,
obs_legend_loc=None,
title=title,
hide_cells=True if hide_cells else cells_shown if same_plot else False,
same_plot=same_plot,
lineage_color=lineage_color_mapper[name],
lineage_probability_color=lineage_probability_color,
abs_prob_cmap=abs_prob_cmap,
lineage_probability=show_prob,
ylabel=ylabel,
**kwargs,
)
if sharey in ("row", "all", True) and not ylabel_shown:
plt.setp(ax.get_yticklabels(), visible=True)
if show_xticks_and_label[i]:
plt.setp(ax.get_xticklabels(), visible=True)
else:
ax.set_xlabel(None)
last_ax = ax
ylabel_shown = True
cells_shown = True
key, color, typp, mapper = model._get_colors(cell_color, same_plot=same_plot)
if typp == ColorType.CAT:
if not hide_cells:
model._maybe_add_legend(
fig, ax, mapper=mapper, title=key, loc=obs_legend_loc, is_line=False
)
elif typp == ColorType.CONT:
if same_perc and show_cbar and not hide_cells:
if isinstance(color, np.ndarray):
# plotting cont. observation other than lin. probs as a color
vmin = np.min(color)
vmax = np.max(color)
else:
vmin = np.min([model.w_all for model in successful_models.values()])
vmax = np.max([model.w_all for model in successful_models.values()])
norm = mcolors.Normalize(vmin=vmin, vmax=vmax)
for ax in axes:
children = [
c
for c in ax.get_children()
if isinstance(c, mpl.collections.PathCollection)
]
if len(children):
children[0].set_norm(norm)
divider = make_axes_locatable(last_ax)
cax = divider.append_axes("right", size="2%", pad=0.1)
_ = mpl.colorbar.ColorbarBase(
cax,
norm=norm,
cmap=abs_prob_cmap,
label=key,
ticks=np.linspace(norm.vmin, norm.vmax, 5),
)
if same_plot and lineage_names != [None]:
model._maybe_add_legend(
fig,
ax,
mapper={ln: lineage_color_mapper[ln] for ln in successful_models.keys()},
loc=legend_loc,
)
def _position_legend(ax: mpl.axes.Axes, legend_loc: str, **kwargs) -> mpl.legend.Legend:
"""
Position legend in- or outside the figure.
Parameters
----------
ax
Ax where to position the legend.
legend_loc
Position of legend.
kwargs
Keyword arguments for :func:`matplotlib.pyplot.legend`.
Returns
-------
The created legend.
"""
if legend_loc == "center center out":
raise ValueError("Invalid option: `'center center out'`.")
if legend_loc == "best":
return ax.legend(loc="best", **kwargs)
tmp, loc = legend_loc.split(" "), ""
if len(tmp) == 1:
height, rest = tmp[0], []
width = "right" if height in ("upper", "top", "center") else "left"
else:
height, width, *rest = legend_loc.split(" ")
if rest:
if len(rest) != 1:
raise ValueError(
f"Expected only 1 additional modifier ('in' or 'out'), found `{list(rest)}`."
)
elif rest[0] not in ("in", "out"):
raise ValueError(
f"Invalid modifier `{rest[0]!r}`. Valid options are: `'in', 'out'`."
)
if rest[0] == "in": # ignore in, it's default
rest = []
if height in ("upper", "top"):
y = 1.55 if width == "center" else 1.025
loc += "upper"
elif height == "center":
y = 0.5
loc += "center"
elif height in ("lower", "bottom"):
y = -0.55 if width == "center" else -0.025
loc += "lower"
else:
raise ValueError(
f"Invalid legend position on y-axis: `{height!r}`. "
f"Valid options are: `'upper', 'top', 'center', 'lower', 'bottom'`."
)
if width == "left":
x = -0.05
loc += " right" if rest else " left"
elif width == "center":
x = 0.5
if height != "center": # causes to be like top center
loc += " center"
elif width == "right":
x = 1.05
loc += " left" if rest else " right"
else:
raise ValueError(
f"Invalid legend position on x-axis: `{width!r}`. "
f"Valid options are: `'left', 'center', 'right'`."
)
if rest:
kwargs["bbox_to_anchor"] = (x, y)
return ax.legend(loc=loc, **kwargs)
def _get_backend(model, backend: str) -> str:
return _DEFAULT_BACKEND if _is_any_gam_mgcv(model) else backend
@d.dedent
def _create_callbacks(
adata: AnnData,
callback: Optional[Callable],
obs: Sequence[str],
lineages: Sequence[Optional[str]],
perform_sanity_check: Optional[bool] = None,
**kwargs,
) -> Dict[str, Dict[str, Callable]]:
"""
Create models for each gene and lineage.
Parameters
----------
%(adata)s
callback
Gene and lineage specific prepare callbacks.
obs
Sequence of observations, such as genes.
lineages
Sequence of genes.
perform_sanity_check
Whether to check if all callbacks have the correct signature. This is done by instantiating
dummy model and running the function. We're assuming that the callback isn't really a pricey operation.
If `None`, it is only performed for non-default callbacks.
kwargs
Keyword arguments for ``callback`` when performing the sanity check.
Returns
-------
The created callbacks.
"""
def process_lineages(
obs_name: str, lin_names: Optional[Union[Callable, Dict[Optional[str], Any]]]
) -> None:
if lin_names is None:
lin_names = _default_model_callback
if callable(lin_names):
# sharing the same models for all lineages
for lin_name in lineages:
callbacks[obs_name][lin_name] = lin_names
return
elif not isinstance(lin_names, dict):
raise TypeError(
f"Expected the lineage callback to be either `callable` or a dictionary of callables, "
f"found `{type(lin_names).__name__!r}`."
)
lin_rest_callback = (
lin_names.get("*", _default_model_callback) or _default_model_callback
) # do not pop
if not callable(lin_rest_callback):
raise TypeError(
f"Expected the lineage fallback callback for gene `{obs_name!r}` to be `callable`, "
f"found `{type(lin_rest_callback).__name__!r}`."
)
for lin_name, cb in lin_names.items():
if lin_name == "*":
continue
if not callable(cb):
raise TypeError(
f"Expected the callback for gene `{obs_name!r}` and lineage `{lin_name!r}` "
f"to be `callable`, found `{type(cb).__name__!r}`."
)
callbacks[obs_name][lin_name] = cb
for lin_name in lineages - set(callbacks[obs_name].keys()):
callbacks[obs_name][lin_name] = lin_rest_callback
def maybe_sanity_check(callbacks: Dict[str, Dict[str, Callable]]) -> None:
if not perform_sanity_check:
return
from sklearn.svm import SVR
logg.debug("Performing callback sanity checks")
for gene in callbacks.keys():
for lineage, cb in callbacks[gene].items():
# create the model here because the callback can search the attribute
dummy_model = SKLearnModel(adata, model=SVR())
try:
model = cb(dummy_model, gene=gene, lineage=lineage, **kwargs)
assert model is dummy_model, (
"Creation of new models is not allowed. "
"Ensure that callback returns the same model."
)
assert (
model.prepared
), "Model is not prepared. Ensure that callback calls `.prepare()`."
assert (
model._gene == gene
), f"Callback modified the gene from `{gene!r}` to `{model._gene!r}`."
assert (
model._lineage == lineage
), f"Callback modified the lineage from `{lineage!r}` to `{model._lineage!r}`."
if isinstance(model, FailedModel):
model.reraise()
except Exception as e: # noqa: B902
raise RuntimeError(
f"Callback validation failed for gene `{gene!r}` and lineage `{lineage!r}`."
) from e
def all_callbacks_are_default(cbs: dict) -> bool:
# this correctly implicitly handles '*': None
for vs in cbs.values():
if isinstance(vs, dict):
for cb in vs.values():
if callable(cb) and cb is not _default_model_callback:
return False
elif callable(vs) and vs is not _default_model_callback:
return False
return True
if not len(lineages):
raise ValueError("No lineages have been selected.")
if not len(obs):
raise ValueError("No genes have been selected.")
if callback is None:
callback = _default_model_callback
if perform_sanity_check is None:
perform_sanity_check = (
not all_callbacks_are_default(callback)
if isinstance(callback, dict)
else callback is not _default_model_callback
)
if callable(callback):
callbacks = {o: {lin: callback for lin in lineages} for o in obs}
maybe_sanity_check(callbacks)
return callbacks
lineages, obs = (
set(_unique_order_preserving(lineages)),
set(_unique_order_preserving(obs)),
)
callbacks = defaultdict(dict)
if isinstance(callback, dict):
# can be specified as None
obs_rest_callback = (
callback.pop("*", _default_model_callback) or _default_model_callback
)
for obs_name, lin_names in callback.items():
process_lineages(obs_name, lin_names)
if callable(obs_rest_callback):
for obs_name in obs - set(callback.keys()):
process_lineages(obs_name, callback.get(obs_name, obs_rest_callback))
else:
raise TypeError(
f"Expected the gene fallback callback to be `callable`, "
f"found `{type(obs_rest_callback).__name__!r}`."
)
else:
raise TypeError(
f"Class `{type(callback).__name__!r}` must be `callable` or "
f"a gene and lineage specific `dict` of `callables`."
)
if set(callbacks.keys()) & obs != obs:
raise ValueError(
f"Missing gene callbacks for the following genes: `{list(obs - set(callbacks.keys()))}`."
)
for gene, vs in callbacks.items():
if set(vs.keys()) & lineages != lineages:
raise ValueError(
f"Missing lineage callbacks for gene `{gene!r}`: `{list(lineages - set(vs.keys()))}`."
)
maybe_sanity_check(callbacks)
return callbacks
def _default_model_callback(model: BaseModel, **kwargs) -> BaseModel:
# we could filter kwargs, but it's better not to - this will detect if we pass useless stuff
return model.prepare(**kwargs)
@d.dedent
def composition(
adata: AnnData,
key: str,
fontsize: Optional[str] = None,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[float] = None,
save: Optional[Union[str, Path]] = None,
**kwargs: Any,
) -> None:
"""
Plot a pie chart for categorical annotation.
Parameters
----------
%(adata)s
key
Key in :attr:`anndata.AnnData.obs` containing categorical observation.
fontsize
Font size for the pie chart labels.
%(plotting)s
kwargs
Keyword arguments for :func:`matplotlib.pyplot.pie`.
Returns
-------
%(just_plots)s
"""
if key not in adata.obs:
raise KeyError(f"Data not found in `adata.obs[{key!r}]`.")
if not is_categorical_dtype(adata.obs[key]):
raise TypeError(
f"Expected `adata.obs[{key!r}]` is not `categorical`, "
f"found `{ | infer_dtype(adata.obs[key]) | pandas.api.types.infer_dtype |
from numpy.random import default_rng
import numpy as np
import emcee
import pandas as pd
from tqdm.auto import tqdm
from sklearn.preprocessing import StandardScaler
import copy
from scipy.stats import norm, ortho_group
import random
import math
import scipy.stats as ss
"""
A collection of synthetic data generators, including multivariate normal data, data generated with archimedean copulas,
data generated with arbitrary marginals and gaussian copula and data from already existing drift generators.
"""
rng = default_rng()
# three available archimedean copulas
def clayton(theta, n):
v = random.gammavariate(1/theta, 1)
uf = [random.expovariate(1)/v for _ in range(n)]
return [(k+1)**(-1.0/theta) for k in uf]
def amh(theta, n):
# NOTE: Use SciPy RNG for convenience here
v = ss.geom(1-theta).rvs()
uf = [random.expovariate(1)/v for _ in range(n)]
return [(1-theta)/(math.exp(k)-theta) for k in uf]
def frank(theta, n):
v = ss.logser(1-math.exp(-theta)).rvs()
uf = [random.expovariate(1)/v for _ in range(n)]
return [-math.log(1-(1-math.exp(-theta))*(math.exp(-k))/theta) for k in uf]
def new_distribution_cholesky(pre_mean, ch_mean, perturbation=0.1):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = 10000
var = None
while cond > 1000:
chol = ortho_group.rvs(len(pre_mean))
var = [email protected]
cond = np.linalg.cond(var)
return pre_mean, var
def new_similar_distribution_cholesky(pre_mean, pre_chol, ch_mean, perturbation=0.1):
"""Problematic, as the resulting cov matrix is almost diagonal!"""
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation # not to change the mean too much
cond = 10000
var = None
while cond > 1000:
chol = pre_chol + np.random.uniform(0, perturbation, (len(pre_mean), len(pre_mean)))
chol = nearest_orthogonal_matrix(chol)
var = [email protected]
cond = np.linalg.cond(var)
return pre_mean, var
def new_distribution_svd(pre_mean, ch_mean, perturbation=0.1, conditioning=1000):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = conditioning*100*len(pre_mean)
var = None
while cond > conditioning*10*len(pre_mean) or cond < conditioning*len(pre_mean):
nums = np.random.uniform(0, 1, len(pre_mean)) # change eigenvalues distribution
corr = ss.random_correlation.rvs(nums/sum(nums)*len(pre_mean), random_state=rng)
S = np.diag(np.random.uniform(0, 1, len(pre_mean)))
var = S.T@corr@S
cond = np.linalg.cond(var)
return pre_mean, var
def new_similar_distribution_svd(pre_mean, pre_nums, pre_S, ch_mean, perturbation=0.02):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = 10000*len(pre_mean)
var = None
while cond > 1000*len(pre_mean) or cond < 10*len(pre_mean):
nums = pre_nums + np.random.uniform(0, perturbation, len(pre_mean))
corr = ss.random_correlation.rvs(nums/sum(nums)*len(pre_mean), random_state=rng)
S = pre_S + np.diag(np.random.uniform(0, perturbation/2, len(pre_mean)))
var = S.T@corr@S
cond = np.linalg.cond(var)
return pre_mean, var
def new_distribution(pre_mean, pre_cov, ch_mean, ch_cov, change_X=True, change_y=True):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
# important! A complete mask for cov has to be passed, but only the upper triangular part will be considered
if change_y and change_X:
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5 # not to change the mean too much
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
if not np.all(np.linalg.eigvals(pre_cov) > 0):
pre_cov = nearestPD(pre_cov)
elif change_X:
pre_mean_old = pre_mean
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5 # not to change the mean too much
pre_mean[-1] = pre_mean_old[-1]
pre_cov_old = pre_cov
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov[-1][-1] = pre_cov_old[-1][-1]
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
pre_cov_[-1][-1] = pre_cov_old[-1][-1]
pre_cov = pre_cov_
else: # non mi serve ora fare caso solo y cambia
n_dim = len(pre_cov)
ch_cov = np.array([[False] * int(n_dim)] * int(n_dim), dtype=bool)
ch_cov[:, -1] = [True] * (n_dim-1) + [False]
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov_old = pre_cov
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
# i add a small perturbation to P(X) too, if not I cannot change P(Y|X) without singularity in the cov matrix
pre_cov_[np.invert(ch_cov)] = pre_cov_old[np.invert(ch_cov)]+np.random.normal(size=sum(sum(np.invert(ch_cov))))/20
pre_cov_ = np.tril(pre_cov_.T) + np.triu(pre_cov_, 1)
pre_cov = pre_cov_
return pre_mean, pre_cov
def new_similar_distribution(pre_mean, pre_cov, ch_mean, ch_cov, change_X=True, change_y=True):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
# important! A complete mask for cov has to be passed, but only the upper triangular part will be considered
# new similar distribution, as of now, only permits data drift + covariate drift, unlike abrupt where
# the two can be separated and simulated independently
if change_y:
pre_mean[ch_mean] = pre_mean[ch_mean] + rng.uniform(-0.1, 0.1, size=sum(ch_mean))
pre_cov[ch_cov] = np.reshape(pre_cov[ch_cov], -1) + rng.uniform(-0.1, 0.1, sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
if not np.all(np.linalg.eigvals(pre_cov) > 0):
pre_cov = nearestPD(pre_cov)
else:
pre_mean_old = pre_mean
pre_mean[ch_mean] = pre_mean[ch_mean] + rng.uniform(-0.1, 0.1, size=sum(ch_mean))
pre_mean[-1] = pre_mean_old[-1]
pre_cov_old = pre_cov
pre_cov[ch_cov] = np.reshape(pre_cov[ch_cov], -1) + rng.uniform(-0.1, 0.1, sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov[-1][-1] = pre_cov_old[-1][-1]
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
pre_cov_[-1][-1] = pre_cov_old[-1][-1]
pre_cov = pre_cov_
return pre_mean, pre_cov
def new_distribution_deprecated(pre_mean, pre_cov, ch_mean, ch_cov):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5
pre_cov[ch_cov] = np.random.random((sum(ch_cov),len(pre_mean)))
pre_cov = nearestPD(pre_cov)
return pre_mean, pre_cov
def lnprob_trunc_norm(x, mean, n_dim, C):
if sum(x) > 0 *n_dim:
return -np.inf
else:
return -0.5 *( x -mean).dot(np.linalg.inv(C)).dot( x -mean)
def truncated_normal_sampling(pre_mean, pre_cov, size, n_dim):
if size <= 0:
return None
if size >= n_dim*2:
pos = emcee.utils.sample_ball(pre_mean, np.sqrt(np.diag(pre_cov)), size=size)
else:
pos = rng.multivariate_normal(pre_mean, pre_cov, size=size)
S = emcee.EnsembleSampler(size, n_dim, lnprob_trunc_norm, args=(pre_mean, n_dim, pre_cov))
pos, prob, state = S.run_mcmc(pos, 100)
# print(np.max(pos))
return pos
def nearestPD(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = np.linalg.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
I = np.eye(A.shape[0])
k = 1
while not isPD(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = np.linalg.cholesky(B)
return True
except np.linalg.LinAlgError:
return False
def nearest_orthogonal_matrix(A):
'''
Find closest orthogonal matrix to *A* using iterative method.
Bases on the code from REMOVE_SOURCE_LEAKAGE function from OSL Matlab package.
Args:
A (numpy.array): array shaped k, n, where k is number of channels, n - data points
Returns:
L (numpy.array): orthogonalized matrix with amplitudes preserved
Reading:
<NAME>., A symmetric multivariate leakage correction for MEG connectomes.,
Neuroimage. 2015 Aug 15;117:439-48. doi: 10.1016/j.neuroimage.2015.03.071
'''
#
MAX_ITER = 2000
TOLERANCE = np.max((1, np.max(A.shape) * np.linalg.svd(A.T, False, False)[0])) * np.finfo(A.dtype).eps # TODO
reldiff = lambda a, b: 2 * abs(a - b) / (abs(a) + abs(b))
convergence = lambda rho, prev_rho: reldiff(rho, prev_rho) <= TOLERANCE
A_b = A.conj()
d = np.sqrt(np.sum(A * A_b, axis=1))
rhos = np.zeros(MAX_ITER)
for i in range(MAX_ITER):
scA = A.T * d
u, s, vh = np.linalg.svd(scA, False)
V = np.dot(u, vh)
# TODO check is rank is full
d = np.sum(A_b * V.T, axis=1)
L = (V * d).T
E = A - L
rhos[i] = np.sqrt(np.sum(E * E.conj()))
if i > 0 and convergence(rhos[i], rhos[i - 1]):
break
return L
def generate_normal_drift_data(batch_size, train_size, length, pre_mean_, pre_cov_, ch_mean, ch_cov,
change, n_dim, scale=False, gradual_drift=False, oracle=False, change_X=True,
change_y=True, verbose=False):
"""Generates multivariate normal drifting data"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_cov = pre_cov_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, pre_cov = new_distribution(pre_mean, pre_cov, ch_mean, ch_cov,
change_X=change_X, change_y=change_y)
if gradual_drift:
pre_mean, pre_cov = new_similar_distribution(np.zeros(n_dim), pre_cov, [False] * n_dim, ch_cov,
change_X=change_X, change_y=change_y)
if i == 0:
data = rng.multivariate_normal(pre_mean, pre_cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, pre_cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean))
covs.append(copy.deepcopy(pre_cov))
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_normal_drift_data_cholesky(batch_size, train_size, length, pre_mean_, pre_chol_, ch_mean,
change, n_dim, scale=False, gradual_drift=False, oracle=False, verbose=False):
"""Generates multivariate normal drifting data -> no correlation! Do not use!!!"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_chol = pre_chol_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, cov = new_distribution_cholesky(pre_mean, ch_mean)
if gradual_drift:
pre_mean, cov = new_similar_distribution_cholesky(pre_mean, pre_chol, ch_mean)
if i == 0:
data = rng.multivariate_normal(pre_mean, cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean))
covs.append(copy.deepcopy(cov))
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_normal_drift_data_svd(batch_size, train_size, length, pre_mean_, pre_eigs_, pre_S_, ch_mean,
change, n_dim, scale=False, gradual_drift=False, oracle=False, verbose=False):
"""Generates multivariate normal drifting data"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_eigs = pre_eigs_.copy()
pre_S = pre_S_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, cov = new_distribution_svd(pre_mean, ch_mean)
if gradual_drift:
pre_mean, cov = new_similar_distribution_svd(pre_mean, pre_eigs, pre_S, ch_mean)
if i == 0:
data = rng.multivariate_normal(pre_mean, cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean))
covs.append(copy.deepcopy(cov))
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_normal_localised_drift_data(batch_size, train_size, length, pre_mean, pre_cov, ch_mean, ch_cov,
change, n_dim, scale=False, oracle=False, verbose=False):
"""Generates multivariate normal drifting data with drift localised in space with truncated normal sampling
with shifting covariance in the desired part of the space"""
if scale:
scaler = StandardScaler()
df = pd.DataFrame()
means = []
covs = []
pre_mean_2 = pre_mean.copy()
pre_cov_2 = pre_cov.copy()
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i == 0:
data = np.random.multivariate_normal(pre_mean, pre_cov, size=train_size)
else:
data = np.random.multivariate_normal(pre_mean, pre_cov, size=batch_size)
# se in una zona del piano -> change distribution
data = data[data.sum(axis=1) < 0]
if i == 0:
data2 = truncated_normal_sampling(pre_mean_2, pre_cov_2, train_size - len(data), n_dim)
else:
data2 = truncated_normal_sampling(pre_mean_2, pre_cov_2, batch_size - len(data), n_dim)
data2 = data2.clip(-4, 4) # there are some problems in the sampling from the truncated normal
data = np.concatenate((data, data2))
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean_2))
covs.append(pre_cov_2)
if i % change == 0:
pre_mean_2, pre_cov_2 = new_distribution(pre_mean_2, pre_cov_2, ch_mean, ch_cov)
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_gaussian_copula_drift_data(batch_size, train_size, length, marginals, pre_cov_, ch_cov,
change, n_dim, scale=False, gradual_drift=False, oracle=False, verbose=False):
"""Generate data with the desired marginal distributions and a gaussian copula with drifting cov. matrix"""
if scale:
scaler = StandardScaler()
pre_cov = pre_cov_.copy()
df = pd.DataFrame()
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
_, pre_cov = new_distribution(np.zeros(n_dim), pre_cov, [False]*n_dim, ch_cov)
if gradual_drift:
_, pre_cov = new_similar_distribution(np.zeros(n_dim), pre_cov, [False] * n_dim, ch_cov)
if i == 0:
data = rng.multivariate_normal(np.zeros(n_dim), pre_cov, size=train_size)
else:
data = rng.multivariate_normal(np.zeros(n_dim), pre_cov, size=batch_size)
prov_pre = pd.DataFrame(data)
prov = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import pytest
from rdtools.normalization import normalize_with_expected_power
@pytest.fixture()
def times_15():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T')
@pytest.fixture()
def times_30():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='30T')
@pytest.fixture()
def pv_15(times_15):
return pd.Series([1.0, 2.5, 3.0, 2.2, 2.1], index=times_15)
@pytest.fixture()
def expected_15(times_15):
return pd.Series([1.2, 2.3, 2.8, 2.1, 2.0], index=times_15)
@pytest.fixture()
def irradiance_15(times_15):
return pd.Series([1000.0, 850.0, 950.0, 975.0, 890.0], index=times_15)
@pytest.fixture()
def pv_30(times_30):
return | pd.Series([1.0, 3.0, 2.1], index=times_30) | pandas.Series |
# %% [markdown]
# This notebook is a -modified- VSCode notebook version of:
# https://www.kaggle.com/sheriytm/brewed-tpot-for-nyc-with-love-lb0-37
#
# You could find the train data from:
# https://www.kaggle.com/c/nyc-taxi-trip-duration/data
# You could find the fastest routes data from:
# https://www.kaggle.com/oscarleo/new-york-city-taxi-with-osrm
## All the data files should be in the same directory with this file!
#%%
# Importing necessary libraries
import os
import numpy as np
import os
import pandas as pd
from haversine import haversine
import datetime as dt
#%%
# Loading training data
train = pd.read_csv('train.csv')
#%%
# Long and painful future generation part
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
t0 = dt.datetime.now()
train['pickup_datetime'] = pd.to_datetime(train.pickup_datetime)
train.loc[:, 'pickup_date'] = train['pickup_datetime'].dt.date
train['dropoff_datetime'] = pd.to_datetime(train.dropoff_datetime)
train['store_and_fwd_flag'] = 1 * (train.store_and_fwd_flag.values == 'Y')
train['check_trip_duration'] = (train['dropoff_datetime'] - train['pickup_datetime']).map(lambda x: x.total_seconds())
duration_difference = train[np.abs(train['check_trip_duration'].values - train['trip_duration'].values) > 1]
print('Trip_duration and datetimes are ok.') if len(duration_difference[['pickup_datetime', 'dropoff_datetime', 'trip_duration', 'check_trip_duration']]) == 0 else print('Ooops.')
train['trip_duration'].describe()
train['log_trip_duration'] = np.log(train['trip_duration'].values + 1)
# Feature Extraction
coords = np.vstack((train[['pickup_latitude', 'pickup_longitude']].values,
train[['dropoff_latitude', 'dropoff_longitude']].values))
pca = PCA().fit(coords)
train['pickup_pca0'] = pca.transform(train[['pickup_latitude', 'pickup_longitude']])[:, 0]
train['pickup_pca1'] = pca.transform(train[['pickup_latitude', 'pickup_longitude']])[:, 1]
train['dropoff_pca0'] = pca.transform(train[['dropoff_latitude', 'dropoff_longitude']])[:, 0]
train['dropoff_pca1'] = pca.transform(train[['dropoff_latitude', 'dropoff_longitude']])[:, 1]
# Distance
def haversine_array(lat1, lng1, lat2, lng2):
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
AVG_EARTH_RADIUS = 6371 # in km
lat = lat2 - lat1
lng = lng2 - lng1
d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2
h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))
return h
def dummy_manhattan_distance(lat1, lng1, lat2, lng2):
a = haversine_array(lat1, lng1, lat1, lng2)
b = haversine_array(lat1, lng1, lat2, lng1)
return a + b
def bearing_array(lat1, lng1, lat2, lng2):
AVG_EARTH_RADIUS = 6371 # in km
lng_delta_rad = np.radians(lng2 - lng1)
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
y = np.sin(lng_delta_rad) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad)
return np.degrees(np.arctan2(y, x))
train.loc[:, 'distance_haversine'] = haversine_array(train['pickup_latitude'].values, train['pickup_longitude'].values, train['dropoff_latitude'].values, train['dropoff_longitude'].values)
train.loc[:, 'distance_dummy_manhattan'] = dummy_manhattan_distance(train['pickup_latitude'].values, train['pickup_longitude'].values, train['dropoff_latitude'].values, train['dropoff_longitude'].values)
train.loc[:, 'direction'] = bearing_array(train['pickup_latitude'].values, train['pickup_longitude'].values, train['dropoff_latitude'].values, train['dropoff_longitude'].values)
train.loc[:, 'pca_manhattan'] = np.abs(train['dropoff_pca1'] - train['pickup_pca1']) + np.abs(train['dropoff_pca0'] - train['pickup_pca0'])
train.loc[:, 'center_latitude'] = (train['pickup_latitude'].values + train['dropoff_latitude'].values) / 2
train.loc[:, 'center_longitude'] = (train['pickup_longitude'].values + train['dropoff_longitude'].values) / 2
# Datetime features
train.loc[:, 'pickup_weekday'] = train['pickup_datetime'].dt.weekday
train.loc[:, 'pickup_hour_weekofyear'] = train['pickup_datetime'].dt.weekofyear
train.loc[:, 'pickup_hour'] = train['pickup_datetime'].dt.hour
train.loc[:, 'pickup_minute'] = train['pickup_datetime'].dt.minute
train.loc[:, 'pickup_dt'] = (train['pickup_datetime'] - train['pickup_datetime'].min()).dt.total_seconds()
train.loc[:, 'pickup_week_hour'] = train['pickup_weekday'] * 24 + train['pickup_hour']
train.loc[:,'week_delta'] = train['pickup_datetime'].dt.weekday + \
((train['pickup_datetime'].dt.hour + (train['pickup_datetime'].dt.minute / 60.0)) / 24.0)
# Make time features cyclic
train.loc[:,'week_delta_sin'] = np.sin((train['week_delta'] / 7) * np.pi)**2
train.loc[:,'hour_sin'] = np.sin((train['pickup_hour'] / 24) * np.pi)**2
# Speed
train.loc[:, 'avg_speed_h'] = 1000 * train['distance_haversine'] / train['trip_duration']
train.loc[:, 'avg_speed_m'] = 1000 * train['distance_dummy_manhattan'] / train['trip_duration']
train.loc[:, 'pickup_lat_bin'] = np.round(train['pickup_latitude'], 3)
train.loc[:, 'pickup_long_bin'] = np.round(train['pickup_longitude'], 3)
# Average speed for regions
gby_cols = ['pickup_lat_bin', 'pickup_long_bin']
coord_speed = train.groupby(gby_cols).mean()[['avg_speed_h']].reset_index()
coord_count = train.groupby(gby_cols).count()[['id']].reset_index()
coord_stats = pd.merge(coord_speed, coord_count, on=gby_cols)
coord_stats = coord_stats[coord_stats['id'] > 100]
train.loc[:, 'pickup_lat_bin'] = np.round(train['pickup_latitude'], 2)
train.loc[:, 'pickup_long_bin'] = np.round(train['pickup_longitude'], 2)
train.loc[:, 'center_lat_bin'] = np.round(train['center_latitude'], 2)
train.loc[:, 'center_long_bin'] = np.round(train['center_longitude'], 2)
train.loc[:, 'pickup_dt_bin'] = (train['pickup_dt'] // (3 * 3600))
# Clustering
sample_ind = np.random.permutation(len(coords))[:500000]
kmeans = MiniBatchKMeans(n_clusters=100, batch_size=10000).fit(coords[sample_ind])
train.loc[:, 'pickup_cluster'] = kmeans.predict(train[['pickup_latitude', 'pickup_longitude']])
train.loc[:, 'dropoff_cluster'] = kmeans.predict(train[['dropoff_latitude', 'dropoff_longitude']])
t1 = dt.datetime.now()
print('Time till clustering: %i seconds' % (t1 - t0).seconds)
# Temporal and geospatial aggregation
for gby_col in ['pickup_hour', 'pickup_date', 'pickup_dt_bin',
'pickup_week_hour', 'pickup_cluster', 'dropoff_cluster']:
gby = train.groupby(gby_col).mean()[['avg_speed_h', 'avg_speed_m', 'log_trip_duration']]
gby.columns = ['%s_gby_%s' % (col, gby_col) for col in gby.columns]
train = pd.merge(train, gby, how='left', left_on=gby_col, right_index=True)
for gby_cols in [['center_lat_bin', 'center_long_bin'],
['pickup_hour', 'center_lat_bin', 'center_long_bin'],
['pickup_hour', 'pickup_cluster'], ['pickup_hour', 'dropoff_cluster'],
['pickup_cluster', 'dropoff_cluster']]:
coord_speed = train.groupby(gby_cols).mean()[['avg_speed_h']].reset_index()
coord_count = train.groupby(gby_cols).count()[['id']].reset_index()
coord_stats = | pd.merge(coord_speed, coord_count, on=gby_cols) | pandas.merge |
import argparse
from itertools import product
from experiment import *
import pandas as pd
from params_helpers import *
# Search parameters for ILP formulation
def search_ilp(insdir, out, lp1, up1, lp2, up2):
try:
os.mkdir(out)
except OSError:
print("Creation of the directory failed or directory already exists")
for instance_name in os.listdir(insdir):
instance_name = instance_name[:-4]
print("-------------------File ", instance_name, "is processed------------------")
try:
os.mkdir(f"{out}/{instance_name}")
except OSError:
print("Creation of the directory failed")
ins = Instance_ilp(instance_name, insdir)
earliest, latest = ins.earliest, ins.latest
C = ins.C
# Find optimal route by brute force
opt_route, opt_cost = brute_force_tsptw(C, latest, earliest)
# Convert route to array
x = route_to_array_ilp(opt_route, ins)
index = 0
# Parameter search
for p1, p2 in product(np.linspace(lp1, up1, num=(up1 - lp1 + 1)), np.linspace(lp2, up2, num=(up2 - lp2 + 1))):
# Get ising formulation
h, J, of = ising_ilp(ins, p1, p2)
# Run simulated annealing
sampleset = anneal(h, J, BETA_RANGE, NUM_READS, NUM_SWEEPS, BETA_SCHEDULE_TYPE)
# Evaluate each sample
Results = evaluate_sampleset_ilp(ins, sampleset)
# Prepare pandas dataframe for the results
Results.append((False, False, 100, 0, 0, 0, 0))
data = pd.DataFrame(Results)
data.columns = ['valid', 'windows', 'cost', 'energy', 'A', 'B', 'E']
data['A'] = p1
data['B'] = 1
data['E'] = p2
# Energy of the optimal route
energy = dimod.ising_energy(binary_to_spin(x), h, J)
data.loc[len(data)] = [True, True, opt_cost, energy, -1, 0, 0]
# Store the results
data.to_pickle(f"{out}/{instance_name}/{instance_name}_{index}")
index += 1
# Search parameters for edge-based formulation
def search_edge(insdir, out, lp1, up1, lp2, up2):
for instance_name in os.listdir(insdir):
instance_name = instance_name[:-4]
print("-------------------File ", instance_name, "is processed------------------")
try:
os.mkdir(f"{out}/{instance_name}")
except OSError:
print(f"Creation of the directory {out}/{instance_name} failed")
quit()
ins = Instance_edge(instance_name, insdir)
C = ins.C
earliest, latest = ins.earliest, ins.latest
# Find optimal route by brute force
opt_route, opt_cost = brute_force_tsptw(C, latest, earliest)
x = route_to_array_edge(opt_route, ins)
index = 0
# Parameter search
for p1, p2 in product(np.linspace(lp1, up1, num=(up1 - lp1 + 1)), np.linspace(lp2, up2, num=(up2 - lp2 + 1))):
# Get ising formulation
h, J, of = ising_edge(ins, p1, p2)
# Run simulated annealing
sampleset = anneal(h, J, BETA_RANGE, NUM_READS, NUM_SWEEPS, BETA_SCHEDULE_TYPE)
# Evaluate each sample
Results = evaluate_sampleset_edge(ins, sampleset)
# Prepare pandas dataframe for the results
Results.append((False, False, 100, 0, 0, 0, 0))
data = | pd.DataFrame(Results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# Packages are imported.
import pandas as pd
import requests as req
import numpy as np
import datetime as dt
import time
import multiprocessing as mp
import os
import random
import sys
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import statsmodels.stats.multitest as statsmodels
import pickle
np.random.seed(1)
sys.setrecursionlimit(25000)
# Function that requests data. Change IP to external IP of one of the VM's collecting data.
def request_data(job, startTime, endTime):
print('Job request: ' + str(job) + ' started.')
#print(startTime)
#print(job)
#print(endTime)
request = req.get('http://35.246.188.180:30000/api/v1/query_range?query={job=~"' + job + '"}&start=' + startTime + 'Z&end=' + endTime + 'Z&step=1s')
#print(eval(request.content))
metric_data = pd.DataFrame(eval(request.content))
return metric_data
# Change IP to external IP of one of the VM's collecting data.
def get_jobs():
# Cluster: Get list of jobs
x = req.get('http://172.16.58.3:30000/api/v1/label/job/values')
jobList = eval(x.content)['data']
return jobList
# Get timestamps for next 15mins.
def get_timestamps(date):
scrape_timeframes = []
interval = dt.timedelta(minutes=14, seconds=59)
scrape_timeframes.append([str(date).replace(" ", "T"), str(date + interval).replace(" ", "T"), pd.date_range(date, (date + interval), freq='1S')])
return scrape_timeframes
# Info of features is saved in separate files.
def save_feature_info(job, dfList_info):
dfFeatures_info = pd.concat(dfList_info, axis=1)
dfFeatures_info.columns = [str(column) + dfFeatures_info.columns[column] for column in range(0, len(dfFeatures_info.columns))]
list_features_info = [dirs for dirs in os.listdir('data_scraped/feature_info/') if job in dirs]
file_number = len(list_features_info) + 1
dfFeatures_info.to_feather('data_scraped/feature_info/features_' + job + str(file_number) + '.ftr')
# Function to load in data and structure it into one dataframe.
def structureData(job, date):
scrape_timeframes = get_timestamps(date)
features_dict = {'Timestamp' : scrape_timeframes[0][2]}
dfMetric = request_data(job, scrape_timeframes[0][0], scrape_timeframes[0][1])
dfFeatures = pd.DataFrame(features_dict)
print(job + ' metrics: ' + str(len(dfMetric['data'][0])))
dfList = [pd.DataFrame(metric['values'], columns=['Timestamp', '#'.join(list(metric['metric'].values()))]) for metric in dfMetric['data'][0]]
dfList_info = [pd.DataFrame(metric['metric'].keys(), columns=['#'.join(list(metric['metric'].keys()))]) for metric in dfMetric['data'][0]]
dfList.insert(0, dfFeatures)
for df in dfList:
if len(df.columns) > 1:
df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='s')
else:
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
dfList = [df.set_index('Timestamp', drop=True) for df in dfList]
save_feature_info(job, dfList_info)
dfFeatures = pd.concat(dfList, axis=1)
print('Done: ' + job)
return dfFeatures
# Function that saves the data as feather files.
def save_data(job, date):
tic = time.clock()
df = structureData(job, date)
file_number = len(os.listdir('data_scraped/' + job + '/')) + 1
df.reset_index(inplace=True)
df.to_feather('data_scraped/' + job + '/' + job + str(file_number - 1) + '.ftr')
toc = time.clock()
print(job + ': ' + str(toc-tic))
return df
# For each separate job, data is requested in a timeframe of 15mins, structured, and then saved.
job_list = ['envoy-stats', 'istiod', 'kube-state-metrics', 'kubernetes-apiservers', 'kubernetes-cadvisor', 'kubernetes-nodes', 'kubernetes-pods', 'kubernetes-service-endpoints', 'litmuschaos', 'skydive']
def scrape_data(job_list):
dfDates = pd.date_range('11:00', '15:45', freq='15min')
interval = dt.timedelta(minutes=15, seconds=1)
now = dt.datetime.now()
for date in dfDates:
time_passed = False
while time_passed is False:
if now > (date + interval):
time_passed = True
for job in job_list:
df = save_data(job, date)
time.sleep(5)
else:
print('Too early')
time.sleep(60)
now = dt.datetime.now()
# The function to collect data is run.
scrape_data(job_list)
# The collected data is filtered. Litmus related and constant features are removed along with features containing too many missing values.
# Litmus related features are removed.
def filter_chaos(dataset):
filter_keywords_list = ['litmus', 'litmuschaos', 'chaos', 'chaos-runner', 'hog', 'iostress', 'helper']
column_list = []
iteration_list = []
final_feature_list = []
checked_shape = False
dataframes_list = []
df = pd.read_feather(dataset)
for column in df.columns:
if any(keyword in column for keyword in filter_keywords_list) == False:
iteration_list.append(column)
return iteration_list
# Function to convert datatypes to numeric.
def convert_datatype(column_data):
return column_data[0][column_data[1]].apply(pd.to_numeric, errors='coerce')
# Parallelization function.
def modifyParallelized(function, df):
column_list = [[df, column] for column in df.columns]
# All the available cores are used.
cores=mp.cpu_count()
# Create the multiprocessing pool of cores.
pool = mp.Pool(cores)
columns_converted = pool.map(function, column_list)
# Close down the pool and join.
pool.close()
pool.join()
#pool.clear()
return columns_converted
# Scraped data for jobs are read, combined, filtered, and saved to pickle files.
job_list = ['envoy-stats', 'istiod', 'kube-state-metrics', 'kubernetes-apiservers', 'kubernetes-cadvisor', 'kubernetes-nodes', 'kubernetes-pods', 'kubernetes-service-endpoints', 'litmuschaos', 'skydive']
filter_keywords_list = ['litmus', 'litmuschaos', 'chaos', 'chaos-runner', 'hog', 'iostress', 'helper']
for job in job_list:
if os.path.isfile('data_combined_filtered/' + job + '/' + job + '_filtered.pkl') == True:
continue
column_list = []
iteration_list = []
final_feature_list = []
dataframes_list = []
# Paths for each file are generated.
dataset_dirs = ['data_scraped/' + job + '/' + file for file in os.listdir('data_scraped/' + job) if job in file]
dataset_dirs.sort()
print('Start Job: ' + job)
df = pd.read_feather(dataset_dirs[0])
print(df.shape)
# Any non-litmus job is filtered on litmus related features.
if job != 'litmuschaos':
column_list = filter_chaos(dataset_dirs[0])
df = df[column_list]
# NA's are dropped, features having more than 5 NA's are dropped.
df.dropna(axis=1, inplace=True, thresh=(len(df) - 5))
print(df.shape)
column_list = [column for column in df.columns if "Timestamp" not in column]
df[column_list] = pd.concat(modifyParallelized(convert_datatype, df[column_list]), axis=1)
print(df.shape)
df.dropna(axis=1, inplace=True, thresh=(len(df) - 5))
column_list = df.columns
df.set_index('Timestamp', drop=True, inplace=True)
# All datasets are merged into one dataset.
for dataset in dataset_dirs[1:]:
print(dataset)
df_concat = pd.read_feather(dataset).set_index('Timestamp', drop=True)
concat_columns = list(set(column_list).intersection(df_concat.columns))
df_concat = pd.concat(modifyParallelized(convert_datatype, df_concat[concat_columns]), axis=1)
df = pd.concat([df, df_concat[concat_columns]], axis=0)
time.sleep(2)
print(df.shape)
# For litmuschaos, only features showing which experiment is running are kept.
if job == 'litmuschaos':
df.dropna(axis=0, inplace=True)
df = df[[column for column in df.columns if 'awaited_experiments' in column]]
# Final filters are executed.
else:
column_list = filter_chaos(dataset_dirs[0])
df.reset_index(drop=False, inplace=True)
df.dropna(axis=1, inplace=True, thresh=(len(df) - 5))
df.dropna(axis=0, inplace=True)
df = df.loc[:, (df != df.iloc[0]).any()]
print(df.shape)
df.to_pickle('data_combined_filtered/' + job + '/' + job + '_filtered.pkl')
# Feature summary of all jobs.
for job in job_list:
df = pd.read_pickle('data_combined_filtered/' + job + '/' + job + '_filtered.pkl')
print(job)
print(df.shape)
# Metrics are generated for each dataset based on a 45 second sliding window. They are also saved.
def generate_metrics(job):
df = pd.read_pickle('data_combined_filtered/' + job + '/' + job + '_filtered.pkl').set_index('Timestamp', drop=True).sort_values(by='Timestamp')
dfMetric = df.rolling('45s').mean()
dfMetric.columns = ['mean_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_mean_' + job + '.pkl')
dfMetric = df.rolling('45s').quantile(0.05)
dfMetric.columns = ['quantile05_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_quantile05_' + job + '.pkl')
dfMetric = df.rolling('45s').quantile(0.25)
dfMetric.columns = ['quantile25_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_quantile25_' + job + '.pkl')
dfMetric = df.rolling('45s').quantile(0.50)
dfMetric.columns = ['quantile50_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_quantile50_' + job + '.pkl')
dfMetric = df.rolling('45s').quantile(0.75)
dfMetric.columns = ['quantile75_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_quantile75_' + job + '.pkl')
dfMetric = df.rolling('45s').quantile(0.95)
dfMetric.columns = ['quantile95_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_quantile95_' + job + '.pkl')
dfMetric = df.rolling('45s').var()
dfMetric.columns = ['variance_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_variance_' + job + '.pkl')
dfMetric = df.rolling('45s').skew()
dfMetric.columns = ['skewness_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_skewness_' + job + '.pkl')
dfMetric = df.rolling('45s').min()
dfMetric.columns = ['minimum_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_minimum_' + job + '.pkl')
dfMetric = df.rolling('45s').max()
dfMetric.columns = ['maximum_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_maximum_' + job + '.pkl')
dfMetric = df.rolling('45s').kurt()
dfMetric.columns = ['kurtosis_' + column for column in dfMetric.columns]
dfMetric.to_pickle('metrics/' + job + '/' + '_kurtosis_' + job + '.pkl')
return job + ' Done'
def generateParallelized(function, job_list):
# All the available cores are used.
cores=mp.cpu_count()
# Create the multiprocessing pool of cores.
pool = mp.Pool(cores)
columns_converted = pool.map(function, job_list)
# Close down the pool and join.
pool.close()
pool.join()
#pool.clear()
return columns_converted
# Metrics are generated for each dataset.
generateParallelized(generate_metrics, [job for job in job_list if job != 'litmuschaos'])
# Rated features are also saved.
df = pd.DataFrame()
for job in [job for job in job_list if job != 'litmuschaos']:
df_new = | pd.read_pickle('data_combined_filtered/' + job + '/' + job + '_filtered.pkl') | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 11:37:16 2019
@author: Lieke
"""
import numpy as np
from numpy import linalg as LA
import pandas as pd
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import StratifiedKFold
from scipy.stats import ttest_ind
from .utils import TreeNode
@ignore_warnings(category=ConvergenceWarning)
def train_tree(data, labels, tree, classifier = 'svm_occ', dimred = True, useRE = True, FN = 1):
'''
Train the hierarchical classifier.
Parameters
----------
data: training data (cells x genes)
labels: labels of the training data
tree: classification tree (build for the training data using newick.py)
classifier: which classifier to use ('svm' or 'svm_occ')
dimred: if dimensionality reduction should be applied
useRE: if cells should be could be rejected using the reconstruction error
FN: percentage of FN allowed
Return
------
tree: trained classification tree
'''
numgenes = np.shape(data)[1]
if numgenes > 100:
num_components = 100
else:
num_components = 0.9
if(useRE == True):
## First determine the threshold
perc = 100-(FN)
sss = StratifiedKFold(n_splits = 5, shuffle = True, random_state = 0)
sss.get_n_splits(data, labels)
RE = []
for trainindex, testindex in sss.split(data, labels):
train = data.iloc[trainindex]
test = data.iloc[testindex]
pca = PCA(n_components = num_components, random_state = 0)
pca.fit(train)
test_t = pca.transform(test)
test_rec = pca.inverse_transform(test_t)
RE_error2 = LA.norm(test - test_rec, axis = 1)
RE.append(np.percentile(RE_error2,perc))
pca = PCA(n_components = num_components, random_state = 0)
pca.fit(data)
tree[0].set_pca(pca, None) #Save PCA transformation to the root node, so we can apply it to a test set
tree[0].set_RE(np.median(RE))
if(dimred == True):
if(useRE == False):
pca = PCA(n_components = num_components, random_state = 0)
pca.fit(data)
tree[0].set_pca(pca, None) #Save PCA transformation to the root node, so we can apply it to a test set
tree[0].set_dimred(True)
data = pca.transform(data)
data = | pd.DataFrame(data) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Script is used to determine any potential sites that may be using uploading erroneous measurements. Sites may have 'outlier' values beacuse (running list):
# - They may be using a unit_concept_id that does not have a correspondining 'conversion' in '[unit_mapping.csv](https://github.com/all-of-us/curation/blob/develop/data_steward/resource_files/unit_mapping.csv)'.
from google.cloud import bigquery
# %reload_ext google.cloud.bigquery
client = bigquery.Client()
# %load_ext google.cloud.bigquery
# +
from notebooks import parameters
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import six
import scipy.stats
import pandas as pd
# -
measurement_ancestors = [
# lipids
40782589,
40795800,
40772572
# #cbc
# 40789356, 40789120, 40789179, 40772748,
# 40782735, 40789182, 40786033, 40779159
# #cbc w diff
# 40785788, 40785796, 40779195, 40795733,
# 40795725, 40772531, 40779190, 40785793,
# 40779191, 40782561, 40789266
#cmp
# 3049187, 3053283, 40775801, 40779224,
# 40782562, 40782579, 40785850, 40785861,
# 40785869, 40789180, 40789190, 40789527,
# 40791227, 40792413, 40792440, 40795730,
# 40795740, 40795754
#physical measurement
# 40654163,
# 40655804,
# 40654162,
# 40655805,
# 40654167,
# 40654164
]
DATASET = parameters.LATEST_DATASET
print("""
DATASET TO USE: {}
""".format(DATASET))
def find_descendants(DATASET, ancestor_concept):
"""
Function is used to find the descendants of a particular ancestor concept ID using
Bigquery.
This function then creates a long string of said 'descendant' concepts so it can
be used in future queries.
Parameters
----------
DATASET (string): string representing the dataset to be queried. Taken from the
parameters file
ancestor_concept (integer): integer that is the 'ancestor_concept_id' for a particular
set of labs
Returns
-------
string_desc_concepts(string): string of all the descendant concept IDs that
represent the concept_ids for the particular measurement set
"""
descendant_concepts = """
SELECT
DISTINCT
m.measurement_concept_id
FROM
`{}.unioned_ehr_measurement` m
LEFT JOIN
`{}.concept_ancestor` ca
ON
m.measurement_concept_id = ca.descendant_concept_id
WHERE
ca.ancestor_concept_id IN ({})
GROUP BY 1""".format(DATASET, DATASET, ancestor_concept)
print(descendant_concepts)
desc_concepts_df = pd.io.gbq.read_gbq(descendant_concepts,
dialect='standard')
print('success!')
descendant_concept_ids = desc_concepts_df['measurement_concept_id'].tolist()
string_desc_concepts = "("
num_descs = len(descendant_concept_ids)
for idx, concept_id in enumerate(descendant_concept_ids):
string_desc_concepts += str(concept_id)
if idx < num_descs - 1:
string_desc_concepts += ", "
else:
string_desc_concepts += ")"
return string_desc_concepts
def find_total_number_of_units_for_lab_type(DATASET, string_desc_concepts):
"""
Function is used to find the total number of records that have a unit_concept_id
for the 'cluster' of measurement concept IDs that represent a particular lab
type. The unit_concept_id must be:
a. non-null
b. not 0
Parameters
----------
DATASET (string): string representing the dataset to be queried. Taken from the
parameters file
string_desc_concepts(string): string of all the descendant concept IDs that
represent the concept_ids for the particular measurement set
Returns
-------
tot_units (int): represents the total number of recoreds for the particular
measurement set that have a unit_concept ID
"""
total_unit_concept_names = """
SELECT SUM(a.count) as tot_concepts
FROM
(SELECT
DISTINCT
c.concept_name as unit_name, c.standard_concept, COUNT(*) as count
FROM
`{}.unioned_ehr_measurement` m
LEFT JOIN
`{}.concept` c
ON
m.unit_concept_id = c.concept_id
WHERE
m.measurement_concept_id IN {}
AND
m.unit_concept_id IS NOT NULL
AND
m.unit_concept_id <> 0
GROUP BY 1, 2
ORDER BY count DESC) a
""".format(DATASET, DATASET, string_desc_concepts)
tot_units_df = pd.io.gbq.read_gbq(total_unit_concept_names,
dialect='standard')
tot_units = tot_units_df['tot_concepts'].iloc[0]
return tot_units
def find_most_popular_unit_type(tot_units, DATASET, string_desc_concepts):
"""
Function is used to find the most popular unit type for the 'cluster'
of measurement concept IDs that represent a particular measurement set.
Parameters
----------
tot_units (int): represents the total number of recoreds for the particular
measurement set that have a unit_concept ID
DATASET (string): string representing the dataset to be queried. Taken from the
parameters file
string_desc_concepts(string): string of all the descendant concept IDs that
represent the concept_ids for the particular measurement set.
Returns
-------
most_pop_unit (string): string that represents the most popular unit concept
name for the particular measurement set.
"""
units_for_lab = """
SELECT
DISTINCT
c.concept_name as unit_name, c.standard_concept, COUNT(*) as count, ROUND(COUNT(*) / {} * 100, 2) as percentage_units
FROM
`{}.unioned_ehr_measurement` m
LEFT JOIN
`{}.concept` c
ON
m.unit_concept_id = c.concept_id
WHERE
m.measurement_concept_id IN {}
AND
m.unit_concept_id IS NOT NULL
AND
m.unit_concept_id <> 0
GROUP BY 1, 2
ORDER BY count DESC
""".format(tot_units, DATASET, DATASET, string_desc_concepts)
units_for_lab_df = pd.io.gbq.read_gbq(units_for_lab, dialect='standard')
desc_concept_ids = units_for_lab_df['unit_name'].tolist()
most_pop_unit = desc_concept_ids[0]
return most_pop_unit
def metrics_for_whole_dataset(DATASET, most_pop_unit, string_desc_concepts,
ancestor_concept):
"""
Function is used to determine select metrics for the whole dataset for all
of the measurement concept IDs that represent a particular measurement set.
Parameters
----------
DATASET (string): string representing the dataset to be queried. Taken from the
parameters file
most_pop_unit (string): string that represents the most popular unit concept
name for the particular measurement set.
string_desc_concepts (string): string of all the descendant concept IDs that
represent the concept_ids for the particular measurement set.
ancestor_concept (int): used as the 'starting' point for all of the measurements.
can hopefully capture multiple descendants that reflect the same type of
measurement.
Returns
-------
median (float): number that represents the 'median' of all the measurements
for the measurement set that have the most popular unit concept
tot_stdev (float): number that represents the 'standard deviation' of all the
measurements for the measurement set that have the most popular unit concept
tot_records (float): number of records (across all sites) that are being measured
for the particular ancestor_concept_id, unit_concept_id, etc.
mean (float): number that represents the 'mean' of all the measurements for the
meawsurement set that have the most popular unit
decile1 (float): number that represents the 10th percentile of all the measurements
for the measurement set that have the most popular unit concept
quartile1 (float): number that represents the 25th percentile of all the measurements
for the measurement set that have the most popular unit concept
quartile3 (float): number that represents the 75th percentile of all the measurements
for the measurement set that have the most popular unit concept
decile9 (float): number that represents the 90th percentile of all the measurements
for the measurement set that have the most popular unit concept
concept_name (string): string representing the concept name (cluster of measurements)
that is being investigated
"""
find_range_overall = """
SELECT
m.value_as_number
FROM
`{}.unioned_ehr_measurement` m
JOIN
`{}.concept` c
ON
m.unit_concept_id = c.concept_id
WHERE
c.concept_name like '%{}%'
AND
m.measurement_concept_id IN {}
AND
m.value_as_number IS NOT NULL
AND
m.value_as_number <> 9999999 -- issue with one site that heavily skews data
AND
m.value_as_number <> 0.0 -- not something we expect; appears for a site
ORDER BY
m.value_as_number ASC
""".format(DATASET, DATASET, most_pop_unit, string_desc_concepts)
measurements_for_lab_and_unit = pd.io.gbq.read_gbq(find_range_overall,
dialect='standard')
values = measurements_for_lab_and_unit['value_as_number'].tolist()
find_ancestor_lab = """
SELECT
DISTINCT
c.concept_name
FROM
`{}.concept` c
WHERE
c.concept_id = {}
""".format(DATASET, ancestor_id)
concept_name = | pd.io.gbq.read_gbq(find_ancestor_lab, dialect='standard') | pandas.io.gbq.read_gbq |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import pandas as pd
from statsmodels.tsa.api import VAR
def time_series(data, future_forcast, location):
#[[people, violations, time, location],[people, violations, time, location],[people, violations, time, location]]
columns = ["people", "violations", "time", "location"]
df = pd.DataFrame(data=data, columns=columns)
df = df[df["location"]==location]
df['time'] = | pd.to_datetime(df['time']) | pandas.to_datetime |
import pandas as pd
import numpy as np
index = ['Mory', 'Ann']
columns = ['Windy', 'Sunny', 'Snowy', 'Thundery', 'Soild', 'Lighting']
data = {
'Mory': [2.0, 4.0, 6.0, 7.0, 6.0, 5.0],
'Ann': [1.0, 5.0, 1.0, 1.0, 1.0, 1.0],
}
df = pd.DataFrame(index=index, columns=columns, dtype=np.float64)
for (k, v) in data.items():
df.T[k] = v
print(df)
######## demo2
data = {
'Name': ['Mory', 'Ann', 'Jenny'],
'Dream': ['Become a leader', 'Maybe world will enlightened', 'Everyone in happiness'],
'Level': [2.0, 5.0, 2.5]
}
df_surpass = pd.DataFrame(data=data, index=[1, 2, 3])
ann = df_surpass.iloc[1]
mory = df_surpass.iloc[0]
df_surpass.loc[4] = 'Know myself', 3.5, 'Demon'
print(df_surpass)
df_surpass.sort_values(by='Level', ascending=False)
surpass_type = pd.Series(
data=['light', 'demon', 'snow', np.nan],
index=[2, 1, 3, 4]
)
df_surpass['SType'] = surpass_type
print(df_surpass)
df_surpass['SType'].fillna('ordinary', inplace=True)
print(df_surpass)
df_surpass['Level'] = df_surpass['Level'].map(lambda x: min(5, x+1))
print(df_surpass)
# demo dummy variable
data = pd.DataFrame(columns=['weekday'])
data.weekday = [i for i in range(1, 8)] * 3
data['score'] = 1.0
# perform dummy
dummy_data = | pd.get_dummies(data.weekday, prefix='weekday') | pandas.get_dummies |
# -*- coding: utf-8 -*-
import os
import datetime
import pandas as pd
from toolz import merge
from argcheck import expect_types
from WindAdapter.factor_loader import FactorLoader
from WindAdapter.utils import save_data_to_file
from WindAdapter.utils import print_table
from WindAdapter.utils import handle_wind_query_exception
from WindAdapter.custom_logger import CustomLogger
from WindAdapter.data_provider import WindDataProvider
from WindAdapter.helper import WindQueryHelper
from WindAdapter.enums import OutputFormat
LOGGER = CustomLogger()
WIND_DATA_PRODIVER = WindDataProvider()
WIND_QUERY_HELPER = WindQueryHelper()
def reset_log_level(log_level):
"""
:param log_level: enum, 可选择'info', 'critical' 'notset'
:return: 设置WindAdapter函数输出信息的等级, 项目默认为'info'等级
"""
LOGGER.critical('Reset path of data dict to {0}'.format(log_level))
LOGGER.set_level(log_level)
def reset_data_dict_path(path, path_type_abs):
"""
:param path: str, 自定义的data_dict 路径
:param path_type_abs: str, True: 路径为绝对路径, False: 路径为相对路径
:return: data_dict的路径被修改
"""
LOGGER.critical('Reset path of data dict to {0}'.format(path))
os.environ['DATA_DICT_PATH'] = path
os.environ['DATA_DICT_PATH_TYPE_ABS'] = str(path_type_abs)
return
@handle_wind_query_exception(LOGGER)
def get_universe(index_id, date=None, output_weight=False):
"""
:param index_id: str, 可以为指数代码或者'fullA' or 'ashare'(指全市场股票),不区分大小写
:param date: str, optional, YYYYMMDD/YYYY-MM-DD,默认为None,即返回最近交易日的成分股列表
:param output_weight: bool, optional, 是否返回对应的个股权重
:return: 如果output_weight=False, 返回list, 成分股列表
如果output_weight=True, 返回DataFrame
"""
LOGGER.info('Loading the constituent stocks of index {0} at date {1}'.
format(index_id, datetime.date.today() if date is None else date))
ret = WindDataProvider.get_universe(index_id, date, output_weight)
LOGGER.info('Number of the loaded constituent stocks is {0}'.format(len(ret)))
return ret
@handle_wind_query_exception(LOGGER)
def get_live(sec_id, block_size=400):
"""
:param sec_id: list, wind股票代码,如果是全市场,可输入'fulla'或者'ashare'
:param block_size: 内部调用wsq接口一次提取的数量,默认400支
:return: pd.DataFrame, index=sec id, header = [rt_open,rt_high,rt_low,rt_last,rt_vol,rt_amt,rt_vol_ratio,rt_pct_chg_5min]
"""
factor = FactorLoader(start_date=None,
end_date=None,
factor_name='LIVE',
sec_id=sec_id,
block_size=block_size)
ret = factor.load_data()
return ret
@handle_wind_query_exception(LOGGER)
@expect_types(factor_name=(str, list))
def factor_load(start_date, end_date, factor_name, save_file=None, **kwargs):
"""
:param start_date: str, 读取因子数据的开始日期
:param end_date: str, 读取因子数据的结束日期
:param factor_name: str, 因子名称,不区分大小写
:param save_file: str, optional, 保存数据的文件名,可写成 '*.csv' 或者 '*.pkl'
:param kwargs: dict, optional
freq: str, optional, 因子数据的频率, 可选'M', 'W', 'S', 'Y', 参见enums.py - FreqType
tenor: str, optional, 因子数据的周期, 对于截面数据(如换手率,收益率),需要给定数据区间(向前), 可选数字+FreqType, 如'3M'
sec_id, str/list, optional, 股票代码或者是指数代码
output_data_format: enum, optional, 参见enums.py - FreqType
MULTI_INDEX_DF: multi-index DataFrame, index=[date, secID], value = factor
PIVOT_TABLE_DF: DataFrame, index=date, columns = secID
is_index: bool, optional, True: 输入的sec_id是指数,实际需要读取的是该指数成分股的因子数据,
False: 直接读取sec_id的因子数据
date_format: str, optional, 日期的格式, 默认'%Y-%m-%d'
:return: pd.DataFrame 整理好的因子数据
"""
if isinstance(factor_name, list):
kwargs = merge(kwargs, {'output_data_format': OutputFormat.MULTI_INDEX_DF})
factor_names = factor_name
else:
factor_names = [factor_name]
ret = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# Import libraries
import pandas as pd
from pandas import ExcelWriter
import numpy as np
import pickle
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
from sklearn.linear_model import LassoCV
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
def feature_selection(gene_set, n_data_matrix, type):
"""
The FEATURE_SELECTION operation executes the feature selection procedure per gene set and per data matrix, according to the specified type of selection: it takes as input the name of the gene set to consider and the number of the model to build (i.e., the number of the data matrix to consider) and performs the specified feature selection for all the genes of interest in the selected set. Results are exported locally either in Excel or text files.
:param gene_set: the set of genes of interest to analyze
:param n_data_matrix: number identifying the data matrix to analyze (only 2,3 and 5 values are permitted)
:param type: the type of feature selection to perform (possibile values are {'ffs_default','ffs_no_reval','all','lasso','lasso_all'})
Example::
import genereg as gr
gr.FeatureSelection.feature_selection(gene_set='DNA_REPAIR', n_data_matrix=2, type=ffs_default)
gr.FeatureSelection.feature_selection(gene_set='DNA_REPAIR', n_data_matrix=3, type=ffs_default)
gr.FeatureSelection.feature_selection(gene_set='DNA_REPAIR', n_data_matrix=5, type=ffs_default)
"""
# Check input parameters
if n_data_matrix not in [2, 3, 5]:
raise ValueError('Data Matrix ERROR! Possible values: {2,3,5}')
# Define the model to create
model = str(n_data_matrix)
# Import the list of genes of interest and extract in a list the Gene Symbols of all the genes belonging to the current gene set
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
SYMs_current_pathway = []
for index, row in EntrezConversion_df.iterrows():
sym = row['GENE_SYMBOL']
path = row['GENE_SET']
if path == gene_set:
SYMs_current_pathway.append(sym)
if (type == 'ffs_default') or (type == 'ffs_no_reval') or (type == 'all'):
# Create a dataframe to store results of feature selection for each gene
if ((type == 'ffs_default') or (type == 'ffs_no_reval')) and (model == '2'):
summary_results_df = pd.DataFrame(index=SYMs_current_pathway, columns=['TOT Inital N° Features','Discarded Features','N° Features Selected'])
elif (type == 'all'):
summary_results_df = pd.DataFrame(index=SYMs_current_pathway, columns=['TOT Inital N° Features','Discarded Features','N° Features Selected'])
else:
summary_results_df = pd.DataFrame(index=SYMs_current_pathway, columns=['TOT Inital N° Features','Discarded Features','Features Available for Selection','N° Features Selected'])
for current_gene in SYMs_current_pathway:
# Import the model corresponding to the current gene
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == current_gene, 'ENTREZ_GENE_ID'].iloc[0]
model_gene_df = pd.read_excel('./4_Data_Matrix_Construction/Model'+model+'/Gene_'+gene_ID+'_['+current_gene+']'+'_('+gene_set+')-Model_v'+model+'.xlsx',sheetname='Sheet1',header=0)
tot_n_features = len(model_gene_df.columns) - 1 # all the columns, except for the expression of the model gene (i.e. target)
# Count the number of columns that contains all NaN values and that will be discarded before the regression
n_discarded_features = 0
for col, val in model_gene_df.iteritems():
s = model_gene_df[col]
if s.isnull().values.all():
n_discarded_features = n_discarded_features + 1
# Remove NaN columns
model_gene_df.dropna(axis=1, how='all', inplace=True)
# Store the first results in the summary dataframe
summary_results_df.set_value(current_gene, 'TOT Inital N° Features', tot_n_features)
summary_results_df.set_value(current_gene, 'Discarded Features', n_discarded_features)
if (type == 'ffs_default') and ((model == '3') or (model == '5')):
# Load the list of features selected for the previous model
if model == '3':
previous_model = str(int(model)-1)
elif model == '5':
previous_model = str(int(model)-2)
text_file = open('./5_Data_Analysis/'+gene_set+'/FeatureSelection/M'+previous_model+'/Features-Gene_'+gene_ID+'_['+current_gene+'].txt', 'r')
prev_features = text_file.read().split('\n')
prev_features.remove('')
text_file.close()
# Extract the features of the previous model that have not been selected by the feature selection and that we can remove from the current model before performing regression
previous_model_df = pd.read_excel('./4_Data_Matrix_Construction/Model'+previous_model+'/Gene_'+gene_ID+'_['+current_gene+']'+'_('+gene_set+')-Model_v'+previous_model+'.xlsx',sheetname='Sheet1',header=0)
previous_col_names_to_delete = []
for name, values in previous_model_df.iteritems():
if ('EXPRESSION' not in name):
if name not in prev_features:
previous_col_names_to_delete.append(name)
# Update the model keeping only the set of features we can select from
current_model_col_names = set(list(model_gene_df.columns.values))
previous_model_col_names = set(list(previous_model_df.columns.values))
# if no new columns were added to the current matrix with respect to the previous one and no features were selected for the previous matrix, use again the whole matrix for the feature selection
if (current_model_col_names.issubset(previous_model_col_names)) & (len(prev_features) == 0):
model_gene_df = model_gene_df.copy()
else:
model_gene_df.drop(list(current_model_col_names & set(previous_col_names_to_delete)), axis=1, inplace=True)
summary_results_df.set_value(current_gene, 'Features Available for Selection', (len(model_gene_df.columns)-1))
elif (type == 'ffs_no_reval') and ((model == '3') or (model == '5')):
# Load the list of features selected for the previous model
if model == '3':
previous_model = str(int(model)-1)
elif model == '5':
previous_model = str(int(model)-2)
text_file = open('./5_Data_Analysis/'+gene_set+'/FeatureSelection/M'+previous_model+'/Features-Gene_'+gene_ID+'_['+current_gene+'].txt', 'r')
prev_features = text_file.read().split('\n')
prev_features.remove('')
text_file.close()
# Remove from the current model all the features belonging to the previous model
previous_model_df = pd.read_excel('./4_Data_Matrix_Construction/Model'+previous_model+'/Gene_'+gene_ID+'_['+current_gene+']'+'_('+gene_set+')-Model_v'+previous_model+'.xlsx',sheetname='Sheet1',header=0)
current_model_col_names = set(list(model_gene_df.columns.values))
previous_model_col_names = set(list(previous_model_df.columns.values))
current_columns = list(model_gene_df.columns.values)
previous_columns = list(previous_model_df.columns.values)
# if no new columns were added to the current matrix with respect to the previous one, use the whole matrix for the feature selection
if (current_model_col_names.issubset(previous_model_col_names)):
model_gene_df = model_gene_df.copy()
else:
for f in current_columns:
if (f != 'EXPRESSION ('+current_gene+')'):
if f in previous_columns:
model_gene_df.drop(f, axis=1, inplace=True)
summary_results_df.set_value(current_gene, 'Features Available for Selection', (len(model_gene_df.columns)-1))
# Import the dictionary containing the information needed to split the dataframe in five test sets
dict_test_split = pickle.load(open('./5_Data_Analysis/dict_test_split.p', 'rb'))
# Split the dataframe into five dataframes that will be used as test sets
model_gene_df_test1 = model_gene_df.loc[dict_test_split['Test_1']]
model_gene_df_test2 = model_gene_df.loc[dict_test_split['Test_2']]
model_gene_df_test3 = model_gene_df.loc[dict_test_split['Test_3']]
model_gene_df_test4 = model_gene_df.loc[dict_test_split['Test_4']]
model_gene_df_test5 = model_gene_df.loc[dict_test_split['Test_5']]
# Define the corresponding five dataframes to be used as training sets
model_gene_df_train1 = model_gene_df[~model_gene_df.index.isin(model_gene_df_test1.index)]
model_gene_df_train2 = model_gene_df[~model_gene_df.index.isin(model_gene_df_test2.index)]
model_gene_df_train3 = model_gene_df[~model_gene_df.index.isin(model_gene_df_test3.index)]
model_gene_df_train4 = model_gene_df[~model_gene_df.index.isin(model_gene_df_test4.index)]
model_gene_df_train5 = model_gene_df[~model_gene_df.index.isin(model_gene_df_test5.index)]
# Now, execute the feature selection five times, each time considering one of the five dataframes as test set.
# CASE 1 ---------------------------------------------------------------------------------------------------------
# Define the parameters:
case = 1
model_gene_df_train = model_gene_df_train1.copy()
model_gene_df_test = model_gene_df_test1.copy()
# Define the features (predictors X) and the target (label y), together with training and testing sets:
# features X: model gene methylation and other genes expression values
# target y: model gene expression value
X_train = np.array(model_gene_df_train.drop(['EXPRESSION ('+current_gene+')'],1))
X_test = np.array(model_gene_df_test.drop(['EXPRESSION ('+current_gene+')'],1))
y_train = np.array(model_gene_df_train['EXPRESSION ('+current_gene+')'])
y_test = np.array(model_gene_df_test['EXPRESSION ('+current_gene+')'])
# Reshape y
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# APPLY FEATURE SELECTION
# Sequential Feature Selector performs forward fueature selection.
# The function used is the following:
# SFS(estimator, k_features, forward, floating, scoring, cv, n_jobs, ...)
# where estimator = scikit-learn classifier or regressor
# k_features = number of features to select (int or tuple with a min and max value).
# SFS will consider return any feature combination between min and max
# that scored highest in cross-validation.
# If 'best' is provided, the feature selector will return the feature subset with the best
# cross-validation performance. If 'parsimonious' is provided as an argument,
# the smallest feature subset that is within one standard error of the cross-validation
# performance will be selected.
# forward = forward selection if true, backward selection otherwise
# floating = allows to implement SFFS or SBFS
# scoring = scoring metric
# {accuracy, f1, precision, recall, roc_auc} for classifiers
# {'mean_absolute_error', 'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors
# cv = cross-validation generator (default: 5)
# n_jobs = number of CPUs to use for evaluating different feature subsets in parallel ('-1' means 'all CPUs')
# Define the linear regression object
lr = LinearRegression()
# Count the total number of features
tot_N_features = int(X_train.shape[1])
# Perform feature selection
sfs = SFS(lr, k_features='best', forward=True, floating=False, scoring='neg_mean_squared_error', cv=5)
# Learn model from training data
sfs = sfs.fit(X_train, y_train)
# Get all the details of the forward fits:
# 'get_metric_dict(confidence_interval=0.95)' returns a dictionary, where dictionary keys are the number
# of iterations (number of feature subsets) and where the value for each key is a second dictionary.
# The keys of this second dictionary are:
# 'feature_idx': tuple of the indices of the feature subset
# 'cv_scores': list with individual CV scores
# 'avg_score': average of CV scores
# 'std_dev': standard deviation of the CV score average
# 'std_err': standard error of the CV score average
# 'ci_bound': confidence interval bound of the CV score average (around the computed cross-validation scores)
# and they each have a different value in each iteration.
# So, the general struture of this dictionary is the following:
# {Iteration_1 : {feature_idx: tuple_of_values, cv_scores: list_of_values, avg_score: value,...},
# Iteration_2 : {feature_idx: tuple_of_values, cv_scores: list_of_values, avg_score: value,...},
# Iteration_3 : {feature_idx: tuple_of_values, cv_scores: list_of_values, avg_score: value,...}, ...}
result_dict = sfs.get_metric_dict()
# Compute the mean of cross-validation scores
mean_cv_scores = []
for i in np.arange(1,tot_N_features+1): # values are generated within the interval [start, stop), including start but excluding stop
# since cv_scores are negative numbers in the previous dictionary, I have to add a '-' to compute the mean
mean_cv_scores.append(-np.mean(result_dict[i]['cv_scores']))
# Get the number of features selected, which corresponds to the number of features selected
# in correspondence of the minimum of the mean cross-validation scores
idx_1 = np.argmin(mean_cv_scores)+1
# Get the features indexes for the best forward fit and convert them to list
feature_idx_1 = result_dict[idx_1]['feature_idx']
selected_features_indexes = list(feature_idx_1)
# Extract the names of these features
X_df = model_gene_df.drop(['EXPRESSION ('+current_gene+')'],1)
X_df_columns = list(X_df.columns)
columns_selected_1 = []
for index in selected_features_indexes:
columns_selected_1.append(X_df_columns[index])
# FIT THE NEW MODEL
# Define the new training and test set, according to the features selected
X_train_sfs = X_train[:, feature_idx_1]
X_test_sfs = X_test[:, feature_idx_1]
# Define the linear regression object and train the model using training sets
lr.fit(X_train_sfs, y_train)
# Make predictions
y_predicted = lr.predict(X_test_sfs)
# Set the expected results
y_expected = y_test
# Compute the values of R-squared
train_R2_1 = lr.score(X_train_sfs, y_train)
test_R2_1 = lr.score(X_test_sfs, y_test)
# CASE 2 ---------------------------------------------------------------------------------------------------------
# Define the parameters:
case = 2
model_gene_df_train = model_gene_df_train2.copy()
model_gene_df_test = model_gene_df_test2.copy()
# Define the features (predictors X) and the target (label y), together with training and testing sets:
X_train = np.array(model_gene_df_train.drop(['EXPRESSION ('+current_gene+')'],1))
X_test = np.array(model_gene_df_test.drop(['EXPRESSION ('+current_gene+')'],1))
y_train = np.array(model_gene_df_train['EXPRESSION ('+current_gene+')'])
y_test = np.array(model_gene_df_test['EXPRESSION ('+current_gene+')'])
# Reshape y
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# APPLY FEATURE SELECTION
lr = LinearRegression()
tot_N_features = int(X_train.shape[1])
sfs = SFS(lr, k_features='best', forward=True, floating=False, scoring='neg_mean_squared_error', cv=5)
sfs = sfs.fit(X_train, y_train)
# Get all the details of the forward fits:
result_dict = sfs.get_metric_dict()
# Compute the mean of cross-validation scores
mean_cv_scores = []
for i in np.arange(1,tot_N_features+1):
mean_cv_scores.append(-np.mean(result_dict[i]['cv_scores']))
# Get the number of features selected
idx_2 = np.argmin(mean_cv_scores)+1
# Get the features indexes for the best forward fit and convert them to list
feature_idx_2 = result_dict[idx_2]['feature_idx']
selected_features_indexes = list(feature_idx_2)
# Extract the names of these features
X_df = model_gene_df.drop(['EXPRESSION ('+current_gene+')'],1)
X_df_columns = list(X_df.columns)
columns_selected_2 = []
for index in selected_features_indexes:
columns_selected_2.append(X_df_columns[index])
# FIT THE NEW MODEL
X_train_sfs = X_train[:, feature_idx_2]
X_test_sfs = X_test[:, feature_idx_2]
lr.fit(X_train_sfs, y_train)
y_predicted = lr.predict(X_test_sfs)
y_expected = y_test
# Compute the values of R-squared
train_R2_2 = lr.score(X_train_sfs, y_train)
test_R2_2 = lr.score(X_test_sfs, y_test)
# CASE 3 ---------------------------------------------------------------------------------------------------------
# Define the parameters:
case = 3
model_gene_df_train = model_gene_df_train3.copy()
model_gene_df_test = model_gene_df_test3.copy()
# Define the features (predictors X) and the target (label y), together with training and testing sets:
X_train = np.array(model_gene_df_train.drop(['EXPRESSION ('+current_gene+')'],1))
X_test = np.array(model_gene_df_test.drop(['EXPRESSION ('+current_gene+')'],1))
y_train = np.array(model_gene_df_train['EXPRESSION ('+current_gene+')'])
y_test = np.array(model_gene_df_test['EXPRESSION ('+current_gene+')'])
# Reshape y
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# APPLY FEATURE SELECTION
lr = LinearRegression()
tot_N_features = int(X_train.shape[1])
sfs = SFS(lr, k_features='best', forward=True, floating=False, scoring='neg_mean_squared_error', cv=5)
sfs = sfs.fit(X_train, y_train)
# Get all the details of the forward fits:
result_dict = sfs.get_metric_dict()
# Compute the mean of cross-validation scores
mean_cv_scores = []
for i in np.arange(1,tot_N_features+1):
mean_cv_scores.append(-np.mean(result_dict[i]['cv_scores']))
# Get the number of features selected
idx_3 = np.argmin(mean_cv_scores)+1
# Get the features indexes for the best forward fit and convert them to list
feature_idx_3 = result_dict[idx_3]['feature_idx']
selected_features_indexes = list(feature_idx_3)
# Extract the names of these features
X_df = model_gene_df.drop(['EXPRESSION ('+current_gene+')'],1)
X_df_columns = list(X_df.columns)
columns_selected_3 = []
for index in selected_features_indexes:
columns_selected_3.append(X_df_columns[index])
# FIT THE NEW MODEL
X_train_sfs = X_train[:, feature_idx_3]
X_test_sfs = X_test[:, feature_idx_3]
lr.fit(X_train_sfs, y_train)
y_predicted = lr.predict(X_test_sfs)
y_expected = y_test
# Compute the values of R-squared
train_R2_3 = lr.score(X_train_sfs, y_train)
test_R2_3 = lr.score(X_test_sfs, y_test)
# CASE 4 ---------------------------------------------------------------------------------------------------------
# Define the parameters:
case = 4
model_gene_df_train = model_gene_df_train4.copy()
model_gene_df_test = model_gene_df_test4.copy()
# Define the features (predictors X) and the target (label y), together with training and testing sets:
X_train = np.array(model_gene_df_train.drop(['EXPRESSION ('+current_gene+')'],1))
X_test = np.array(model_gene_df_test.drop(['EXPRESSION ('+current_gene+')'],1))
y_train = np.array(model_gene_df_train['EXPRESSION ('+current_gene+')'])
y_test = np.array(model_gene_df_test['EXPRESSION ('+current_gene+')'])
# Reshape y
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# APPLY FEATURE SELECTION
lr = LinearRegression()
tot_N_features = int(X_train.shape[1])
sfs = SFS(lr, k_features='best', forward=True, floating=False, scoring='neg_mean_squared_error', cv=5)
sfs = sfs.fit(X_train, y_train)
# Get all the details of the forward fits:
result_dict = sfs.get_metric_dict()
# Compute the mean of cross-validation scores
mean_cv_scores = []
for i in np.arange(1,tot_N_features+1):
mean_cv_scores.append(-np.mean(result_dict[i]['cv_scores']))
# Get the number of features selected
idx_4 = np.argmin(mean_cv_scores)+1
# Get the features indexes for the best forward fit and convert them to list
feature_idx_4 = result_dict[idx_4]['feature_idx']
selected_features_indexes = list(feature_idx_4)
# Extract the names of these features
X_df = model_gene_df.drop(['EXPRESSION ('+current_gene+')'],1)
X_df_columns = list(X_df.columns)
columns_selected_4 = []
for index in selected_features_indexes:
columns_selected_4.append(X_df_columns[index])
# FIT THE NEW MODEL
X_train_sfs = X_train[:, feature_idx_4]
X_test_sfs = X_test[:, feature_idx_4]
lr.fit(X_train_sfs, y_train)
y_predicted = lr.predict(X_test_sfs)
y_expected = y_test
# Compute the values of R-squared
train_R2_4 = lr.score(X_train_sfs, y_train)
test_R2_4 = lr.score(X_test_sfs, y_test)
# CASE 5 ---------------------------------------------------------------------------------------------------------
# Define the parameters:
case = 5
model_gene_df_train = model_gene_df_train5.copy()
model_gene_df_test = model_gene_df_test5.copy()
# Define the features (predictors X) and the target (label y), together with training and testing sets:
X_train = np.array(model_gene_df_train.drop(['EXPRESSION ('+current_gene+')'],1))
X_test = np.array(model_gene_df_test.drop(['EXPRESSION ('+current_gene+')'],1))
y_train = np.array(model_gene_df_train['EXPRESSION ('+current_gene+')'])
y_test = np.array(model_gene_df_test['EXPRESSION ('+current_gene+')'])
# Reshape y
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# APPLY FEATURE SELECTION
lr = LinearRegression()
tot_N_features = int(X_train.shape[1])
sfs = SFS(lr, k_features='best', forward=True, floating=False, scoring='neg_mean_squared_error', cv=5)
sfs = sfs.fit(X_train, y_train)
# Get all the details of the forward fits:
result_dict = sfs.get_metric_dict()
# Compute the mean of cross-validation scores
mean_cv_scores = []
for i in np.arange(1,tot_N_features+1):
mean_cv_scores.append(-np.mean(result_dict[i]['cv_scores']))
# Get the number of features selected
idx_5 = np.argmin(mean_cv_scores)+1
# Get the features indexes for the best forward fit and convert them to list
feature_idx_5 = result_dict[idx_5]['feature_idx']
selected_features_indexes = list(feature_idx_5)
# Extract the names of these features
X_df = model_gene_df.drop(['EXPRESSION ('+current_gene+')'],1)
X_df_columns = list(X_df.columns)
columns_selected_5 = []
for index in selected_features_indexes:
columns_selected_5.append(X_df_columns[index])
# FIT THE NEW MODEL
X_train_sfs = X_train[:, feature_idx_5]
X_test_sfs = X_test[:, feature_idx_5]
lr.fit(X_train_sfs, y_train)
y_predicted = lr.predict(X_test_sfs)
y_expected = y_test
# Compute the values of R-squared
train_R2_5 = lr.score(X_train_sfs, y_train)
test_R2_5 = lr.score(X_test_sfs, y_test)
# Compute the mean of the five training and test R2 scores
train_R2 = (train_R2_1+train_R2_2+train_R2_3+train_R2_4+train_R2_5)/5
test_R2 = (test_R2_1+test_R2_2+test_R2_3+test_R2_4+test_R2_5)/5
# Take the names of the features selected in the five cases, create their intersection
features_intersection = list(set(columns_selected_1) & set(columns_selected_2) & set(columns_selected_3) & set(columns_selected_4) & set(columns_selected_5))
# Define the final set of selected features and finally export the list of columns selected in a .txt file
if (type == 'ffs_no_reval') and ((model == '3') or (model == '5')):
final_features = list(set(features_intersection) | set(prev_features))
else:
final_features = features_intersection
with open ('./5_Data_Analysis/'+gene_set+'/FeatureSelection/M'+model+'/Features-Gene_'+gene_ID+'_['+current_gene+'].txt', 'w') as fp:
final_features_sorted = sorted(final_features)
for i in final_features_sorted:
fp.write('%s\n' % i)
summary_results_df.set_value(current_gene, 'N° Features Selected', len(final_features))
# Export the summary dataframe in an Excel file
writer = ExcelWriter('./5_Data_Analysis/'+gene_set+'/FeatureSelection/M'+model+'/Feature_Selection_SUMMARY.xlsx')
summary_results_df.to_excel(writer,'Sheet1')
writer.save()
elif (type == 'lasso') or (type == 'lasso_all'):
# Create a dataframe to store results of feature selection for each gene
if (type == 'lasso') and (model == '2'):
summary_results_df = pd.DataFrame(index=SYMs_current_pathway, columns=['TOT Inital N° Features','Discarded Features','N° Features Selected'])
elif (type == 'lasso_all'):
summary_results_df = pd.DataFrame(index=SYMs_current_pathway, columns=['TOT Inital N° Features','Discarded Features','N° Features Selected'])
else:
summary_results_df = pd.DataFrame(index=SYMs_current_pathway, columns=['TOT Inital N° Features','Discarded Features','Features Available for Selection','N° Features Selected'])
for current_gene in SYMs_current_pathway:
# Import the model corresponding to the current gene
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == current_gene, 'ENTREZ_GENE_ID'].iloc[0]
model_gene_df = pd.read_excel('./4_Data_Matrix_Construction/Model'+model+'/Gene_'+gene_ID+'_['+current_gene+']'+'_('+gene_set+')-Model_v'+model+'.xlsx',sheetname='Sheet1',header=0)
tot_n_features = len(model_gene_df.columns) - 1 # all the columns, except for the expression of the model gene (i.e. target)
# Count the number of columns that contains all NaN values and that will be discarded before the regression
n_discarded_features = 0
for col, val in model_gene_df.iteritems():
s = model_gene_df[col]
if s.isnull().values.all():
n_discarded_features = n_discarded_features + 1
# Remove NaN columns
model_gene_df.dropna(axis=1, how='all', inplace=True)
# Store the first results in the summary dataframe
summary_results_df.set_value(current_gene, 'TOT Inital N° Features', tot_n_features)
summary_results_df.set_value(current_gene, 'Discarded Features', n_discarded_features)
if (type == 'lasso') and ((model == '3') or (model == '5')):
# Load the list of features selected for the previous model
if model == '3':
previous_model = str(int(model)-1)
elif model == '5':
previous_model = str(int(model)-2)
text_file = open('./5_Data_Analysis/'+gene_set+'/FeatureSelection/M'+previous_model+'/Features-Gene_'+gene_ID+'_['+current_gene+'].txt', 'r')
prev_features = text_file.read().split('\n')
prev_features.remove('')
text_file.close()
# Extract the features of the previous model that have not been selected by the feature selection and that we can remove from the current model before performing regression
previous_model_df = pd.read_excel('./4_Data_Matrix_Construction/Model'+previous_model+'/Gene_'+gene_ID+'_['+current_gene+']'+'_('+gene_set+')-Model_v'+previous_model+'.xlsx',sheetname='Sheet1',header=0)
previous_col_names_to_delete = []
for name, values in previous_model_df.iteritems():
if ('EXPRESSION' not in name):
if name not in prev_features:
previous_col_names_to_delete.append(name)
# Update the model keeping only the set of features we can select from
current_model_col_names = set(list(model_gene_df.columns.values))
previous_model_col_names = set(list(previous_model_df.columns.values))
# if no new columns were added to the current matrix with respect to the previous one and no features were selected for the previous matrix, use again the whole matrix for the feature selection
if (current_model_col_names.issubset(previous_model_col_names)) & (len(prev_features) == 0):
model_gene_df = model_gene_df.copy()
else:
model_gene_df.drop(list(current_model_col_names & set(previous_col_names_to_delete)), axis=1, inplace=True)
summary_results_df.set_value(current_gene, 'Features Available for Selection', (len(model_gene_df.columns)-1))
# Set all the remaining unknown values (NaN) to zero
model_gene_df = model_gene_df.fillna(0)
# DATA STANDARDIZATION:
# Normalize expression values (using the proper scaler from the sklearn library):
# MinMaxScaler() normalizes values between 0 and 1
# StandardScaler() performs Z-score normalization
scaler = preprocessing.StandardScaler() # define the scaler
# Define the dataframe to normalize
to_normalize = model_gene_df.copy()
matrix = to_normalize.values # convert into a numpy array
# Normalize and convert back to pandas dataframe
matrix_scaled = scaler.fit_transform(matrix)
model_gene_df = | pd.DataFrame(matrix_scaled, index=to_normalize.index, columns=to_normalize.columns) | pandas.DataFrame |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
_testing as tm,
concat,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
pytestmark = pytest.mark.single
def test_format_type(setup_path):
df = DataFrame({"A": [1, 2]})
with ensure_clean_path(setup_path) as path:
with HDFStore(path) as store:
store.put("a", df, format="fixed")
store.put("b", df, format="table")
assert store.get_storer("a").format_type == "fixed"
assert store.get_storer("b").format_type == "table"
def test_format_kwarg_in_constructor(setup_path):
# GH 13291
msg = "format is not a defined argument for HDFStore"
with tm.ensure_clean(setup_path) as path:
with pytest.raises(ValueError, match=msg):
HDFStore(path, format="table")
def test_api_default_format(setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
| pd.set_option("io.hdf.default_format", None) | pandas.set_option |
# encoding: utf-8
import re
import collections
import operator
import random
import numpy as np
from PIL import Image
from pathlib import Path
import csv
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import jieba
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from palettable.colorbrewer.sequential import *
import pdfkit
# local python lib
import wechat_const
from ibot_utils import *
class BotAnalyze(object):
"""
Bot Analyse object::
from ibot_chat_analyse import *
ba = BotAnalyze(bot, bot_group)
ba.start_analysis_tasks()
"""
def __init__(self, bot_db, bot, bot_group):
# read configuration
self.debug = False
self.fl_days = get_first_last_days()
# init file handler
self.path_analyse = get_path_custom('analyse')
self.bot_db = bot_db
self.bot = bot
self.bot_group = bot_group
try:
self.group_id = bot_group.ext_attr.group_id
self.group_name = bot_group.ext_attr.group_name
except AttributeError:
self.group_id = 1
pass
def load_chat_history(self, group_id, date_begin, date_end):
return self.bot_db.select("SELECT `id`,`msg_type`,`wx_puid`,`sender_name`,`msg`,`create_time` "
"FROM wx_chat_history WHERE `group_id` = %s"
" AND `create_time` >= %s AND `create_time` <= %s ",
(group_id, date_begin, date_end))
@staticmethod
def format_message(msg):
msg = msg.replace('\n', ' ')
if msg.find('昵称未设置或不符合标准') >= 0:
msg = ''
if msg.find('下次将无警告直接踢出群') >= 0:
msg = ''
return msg
@staticmethod
# r'(@([\u4e00-\u9fa5]|[ -~]|[\s\S])%s)|(@([\u4e00-\u9fa5]|[ -~]|[\s\S]))'
def filter_message(msg):
# get @nickname, if it locates at the end, there is not space char
try:
nickname = re.search(r'@(.+?)%s|@(.+?)+' % wechat_const.space_after_chat_at, msg).group(0)
msg = msg.replace(nickname, '')
except AttributeError:
pass
# TODO no so pretty here, should filter with nickname list
msg = msg.replace('@野生小新|IoT|全栈构架', ' ')
msg = msg.replace('哈哈', '')
return msg
def save_chat_in_current_month(self, group_id):
results = self.load_chat_history(group_id, self.fl_days[0], self.fl_days[1])
path_csv_file = os.path.join(self.path_analyse,
'%s_chat_%s_%s.csv' % (self.group_id, self.fl_days[0], self.fl_days[1]))
with open(path_csv_file, mode='w', encoding='utf-8') as csv_file:
fieldnames = ['id', 'create_time', 'msg_type', 'wx_puid', 'sender_name', 'msg']
csv_writer = csv.writer(csv_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(fieldnames)
for row in results:
row_id = row[0]
msg_type = row[1]
wx_puid = row[2]
sender_name = row[3]
msg = row[4]
create_time = row[5]
msg = self.format_message(msg)
csv_writer.writerow([row_id, create_time, msg_type, wx_puid, sender_name, msg])
csv_file.close()
return path_csv_file
@staticmethod
def load_stopwords():
filepath = os.path.join('./assets', r'stopwords_cn.txt')
stopwords = [line.strip() for line in open(filepath, encoding='utf-8').readlines()]
# print(stopwords) # ok
return stopwords
@staticmethod
def load_stopwords_it(column='all'):
if column == 'all':
filepath_branch = os.path.join('./assets', r'stopwords_it_branch.txt')
stopwords_branch = [line.strip() for line in open(filepath_branch, encoding='utf-8').readlines()]
filepath_language = os.path.join('./assets', r'stopwords_it_language.txt')
stopwords_language = [line.strip() for line in open(filepath_language, encoding='utf-8').readlines()]
stopwords = set()
stopwords.append(stopwords_branch)
stopwords.append(stopwords_language)
else:
filepath = os.path.join('./assets', r'stopwords_it_%s.txt' % column)
stopwords = [line.strip() for line in open(filepath, encoding='utf-8').readlines()]
return stopwords
def gen_wordcloud_chat_history(self, csv_file):
df = pd.read_csv(csv_file, delimiter='\t', encoding='utf-8')
# print(len(df))
# print(df.head())
stopwords = set(STOPWORDS)
stopwords.update(self.load_stopwords())
message = df['msg']
word_count = ""
for msg in message:
if msg is not np.NaN:
msg = self.filter_message(msg)
seg_list = jieba.cut(msg, cut_all=True, HMM=True)
for word in seg_list:
word_count = word_count + word + " "
shape_file = './assets/chat_history_shape.png'
shape = np.array(Image.open(shape_file))
font = r'./assets/heiti.ttf'
word_cloud = WordCloud(
margin=2,
mask=shape,
font_path=font,
scale=1,
# max_words=200,
# min_font_size=4,
# max_font_size=150,
stopwords=stopwords,
random_state=42,
background_color='white',
width=1080,
height=720).generate(word_count)
path_image = os.path.join(self.path_analyse,
'%s_chat_word_cloud_%s_%s.png' % (self.group_id, self.fl_days[0], self.fl_days[1]))
word_cloud.to_file(path_image)
return path_image
@staticmethod
def cal_time_list_chat_freq_day(df):
time_list = {}
# 2019-05-29 09:41:25
create_times = df['create_time']
for create_time in create_times:
dt_stamp = mk_datetime(create_time)
hour_in_24 = dt_stamp.hour
if hour_in_24 in time_list:
time_list[hour_in_24] = time_list[hour_in_24] + 1
else:
time_list[hour_in_24] = 1
# fulfill the time list
for i in range(0, 24):
if i not in time_list:
time_list[i] = 0
time_list = collections.OrderedDict(sorted(time_list.items()))
return time_list
def gen_bar_plot_msg_type(self, csv_file):
df = pd.read_csv(csv_file, delimiter='\t', encoding='utf-8')
df['msg_type'].value_counts().plot(kind='bar')
plt.subplots_adjust(bottom=0.2)
plt.title('Message Type [%s - %s]' % (self.fl_days[0], self.fl_days[1]))
path_image = os.path.join(self.path_analyse,
'%s_chat_msg_type_bar_%s_%s.png' % (self.group_id, self.fl_days[0], self.fl_days[1]))
plt.savefig(path_image)
plt.close()
return path_image
def gen_bar_plot_chat_freq_day(self, csv_file):
df = pd.read_csv(csv_file, delimiter='\t', encoding='utf-8')
msg_count = len(df)
time_list = self.cal_time_list_chat_freq_day(df)
plt.figure(figsize=(18, 9))
plt.bar(time_list.keys(), time_list.values(), width=.8, facecolor='lightskyblue', edgecolor='white')
plt.xticks(range(len(time_list)), time_list.keys())
for x_axies in time_list:
y_axies = time_list[x_axies]
label = '{}%'.format(round(y_axies*1.0/msg_count*100, 2))
plt.text(x_axies, y_axies+0.05, label, ha='center', va='bottom')
plt.title('Chat frequency in 24 hours [%s - %s]' % (self.fl_days[0], self.fl_days[1]))
path_image = os.path.join(self.path_analyse,
'%s_chat_freq_day_bar_%s_%s.png' % (self.group_id, self.fl_days[0], self.fl_days[1]))
plt.savefig(path_image)
plt.close()
return path_image
def gen_spot_plot_chat_count_day(self, csv_file):
df = pd.read_csv(csv_file, delimiter='\t', encoding='utf-8')
time_list = self.cal_time_list_chat_freq_day(df)
max_freq = max(time_list.items(), key=operator.itemgetter(1))[0]
x = []
y = []
for i in range(0, 24):
x.append(str(i)+':00-'+str(i+1)+':00')
y.append(time_list[i])
x_array = np.array(x)
y_array = np.array(y)
# plt.rcParams['font.sans-serif'] = ['SimHei']
plt.figure(figsize=(16, 9))
plt.subplots_adjust(bottom=0.2)
plt.scatter(x_array, y_array, color="blue", label="times")
plt.xlabel('Time 00:00—24:00')
plt.ylabel('Chat Frequency [%s - %s]' % (self.fl_days[0], self.fl_days[1]))
plt.xticks(range(0, 24), rotation=75, fontsize=10)
plt.yticks(range(0, max_freq + 200, 20))
# plt.legend(loc='lower right')
# plt.show()
path_image = os.path.join(self.path_analyse,
'%s_chat_count_day_spot_%s_%s.png'
% (self.group_id, self.fl_days[0], self.fl_days[1]))
plt.savefig(path_image, format='png')
plt.close()
return path_image
'''
member activity heat map
:param csv_file: csv file dir
'''
def gen_heatmap_member_activity(self, csv_file):
df = pd.read_csv(csv_file, delimiter='\t', encoding='utf-8')
create_times = df['create_time']
week_online = [[0 for j in range(24)] for i in range(7)]
for li in create_times:
week_online[int(mk_datetime(li, "%Y-%m-%d %H:%M:%S").weekday())][int(li[11:13])] += 1
week_online = np.array([li for li in week_online])
columns = [str(i) + '-' + str(i + 1) for i in range(0, 24)]
index = ['Mon.', 'Tue.', 'Wed.', 'Thu.', 'Fri.', 'Sat.', 'Sun.']
week_online = pd.DataFrame(week_online, index=index, columns=columns)
plt.figure(figsize=(18.5, 9))
plt.rcParams['font.sans-serif'] = ['SimHei']
sns.set()
# Draw a heatmap with the numeric values in each cell
sns.heatmap(week_online, annot=True, fmt="d", cmap="YlGnBu")
path_image = os.path.join(self.path_analyse,
'%s_activity_heatmap_%s_%s.png' % (self.group_id, self.fl_days[0], self.fl_days[1]))
plt.savefig(path_image, format='png', dpi=300)
plt.close()
return path_image
@staticmethod
def get_info_from_nickname(nickname):
nickname = str.strip(nickname)
nickname = nickname.replace(' ', ' ')
nickname = nickname.replace('|', '|')
nickname = nickname.replace(' | ', '|')
nickname = nickname.replace('| ', '|')
nickname = nickname.replace(' |', '|')
nickname = nickname.replace('&', '&')
branch = ''
language = ''
reg = r'([\u4e00-\u9fa5]|[ -~]|[\s\S])+\|([\u4e00-\u9fa5]|[ -~])+\|([\u4e00-\u9fa5]|[ -~])+'
if re.match(reg, nickname):
try:
parsed = re.split(r'\|', nickname)
branch = parsed[1]
language = parsed[2]
except AttributeError:
pass
return [branch, language]
@staticmethod
def format_readable_nickname(text):
text = text.replace('&', '&')
return text
@staticmethod
def convert_it_term(text, word_count):
if re.search(r'c\+\+', text, re.IGNORECASE):
word_count = word_count + 'Cpp' + " "
if re.search(r'\s.*?\s', text, re.IGNORECASE):
word_count = word_count + 'C语言' + " "
return word_count
def save_member_detail_list(self):
# include region, gender, signature
self.bot_group.update_group(members_details=True)
path_csv_file = get_path_custom('group_member') + '/%s_member_detail.csv' % self.group_id
with open(path_csv_file, mode='w', encoding='utf-8') as csv_file:
fieldnames = ['puid', 'nickname', 'gender', 'branch', 'language']
csv_writer = csv.writer(csv_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(fieldnames)
for member in self.bot_group:
nickname = member.name # display_name
wx_puid = member.puid
nickname = self.format_readable_nickname(nickname)
gender = member.sex
result = self.get_info_from_nickname(nickname)
csv_writer.writerow([wx_puid, nickname, gender, result[0], result[1]])
csv_file.close()
return path_csv_file
@staticmethod
def color_func_gray(word, font_size, position, orientation, random_state=None, **kwargs):
return "hsl(0, 0%%, %d%%)" % random.randint(60, 100)
@staticmethod
def color_func_red(word, font_size, position, orientation, random_state=None, **kwargs):
# https://jiffyclub.github.io/palettable/colorbrewer/sequential
return tuple(Reds_9.colors[random.randint(2, 8)])
@staticmethod
def color_func_blue(word, font_size, position, orientation, random_state=None, **kwargs):
return tuple(Blues_9.colors[random.randint(2, 8)])
@staticmethod
def color_func_YlGn_9(word, font_size, position, orientation, random_state=None, **kwargs):
return tuple(YlGn_9.colors[random.randint(2, 8)])
@staticmethod
def color_func_PuBu_9(word, font_size, position, orientation, random_state=None, **kwargs):
return tuple(PuBu_9.colors[random.randint(2, 8)])
'''
wordcloud for branches & languages information of members' nickname
:param csv_file: csv file dir
:param column: column name: branch, language
:param gender: all:all, male:male+unknown, female:female
'''
def gen_wordcloud_info_nicknames(self, csv_file, column='branch', gender='all'):
df = | pd.read_csv(csv_file, delimiter='\t', encoding='utf-8') | pandas.read_csv |
#from ai4good.models.cm.initialise_parameters import params, control_data, categories, calculated_categories, change_in_categories
from ai4good.models.cm.initialise_parameters import Parameters
from math import exp, ceil, log, floor, sqrt
import numpy as np
from scipy.integrate import ode
from scipy.stats import norm, gamma
import pandas as pd
import statistics
import os
import pickle
from tqdm import tqdm
import dask
from dask.diagnostics import ProgressBar
def timing_function(t,time_vector):
for ii in range(ceil(len(time_vector)/2)):
if t>=time_vector[2*ii] and t<time_vector[2*ii+1]:
return True
# if wasn't in any of these time interval
return False
##
# -----------------------------------------------------------------------------------
##
class Simulator:
def __init__(self, params: Parameters):
self.params = params
def ode_system(self, t, y, # state of system
infection_matrix, age_categories, symptomatic_prob, hospital_prob, critical_prob, beta, # params
latentRate, removalRate, hospRate, deathRateICU, deathRateNoIcu, # more params
better_hygiene, remove_symptomatic, remove_high_risk, ICU_capacity # control
):
##
params = self.params
dydt = np.zeros(y.shape)
I_vec = [y[params.I_ind + i * params.number_compartments] for i in range(age_categories)]
# H_vec = [ y[params.H_ind+i*params.number_compartments] for i in range(age_categories)]
C_vec = [y[params.C_ind + i * params.number_compartments] for i in range(age_categories)]
A_vec = [y[params.A_ind + i * params.number_compartments] for i in range(age_categories)]
total_I = sum(I_vec)
# better hygiene
if timing_function(t, better_hygiene['timing']): # control in place
control_factor = better_hygiene['value']
else:
control_factor = 1
# removing symptomatic individuals
if timing_function(t, remove_symptomatic['timing']): # control in place
remove_symptomatic_rate = min(total_I, remove_symptomatic[
'rate']) # if total_I too small then can't take this many off site at once
else:
remove_symptomatic_rate = 0
S_removal = 0
for i in range(age_categories - remove_high_risk['n_categories_removed'], age_categories):
S_removal += y[params.S_ind + i * params.number_compartments] # add all old people to remove
for i in range(age_categories):
# removing symptomatic individuals
# these are put into Q ('quarantine');
quarantine_sick = remove_symptomatic_rate * y[
params.I_ind + i * params.number_compartments] / total_I # no age bias in who is moved
# removing susceptible high risk individuals
# these are moved into O ('offsite')
if i in range(age_categories - remove_high_risk['n_categories_removed'],
age_categories) and timing_function(t, remove_high_risk['timing']):
remove_high_risk_people = min(remove_high_risk['rate'],
S_removal) # only removing high risk (within time control window). Can't remove more than we have
else:
remove_high_risk_people = 0
# ICU capacity
if sum(C_vec) > 0: # can't divide by 0
ICU_for_this_age = ICU_capacity['value'] * y[params.C_ind + i * params.number_compartments] / sum(
C_vec) # hospital beds allocated on a first come, first served basis
else:
ICU_for_this_age = ICU_capacity['value']
# ODE system:
# S
dydt[params.S_ind + i * params.number_compartments] = (
- y[params.S_ind + i * params.number_compartments] * control_factor * beta * (
np.dot(infection_matrix[i, :], I_vec) + params.AsymptInfectiousFactor * np.dot(
infection_matrix[i, :], A_vec))
- remove_high_risk_people * y[params.S_ind + i * params.number_compartments] / S_removal)
# E
dydt[params.E_ind + i * params.number_compartments] = (
y[params.S_ind + i * params.number_compartments] * control_factor * beta * (
np.dot(infection_matrix[i, :], I_vec) + params.AsymptInfectiousFactor * np.dot(
infection_matrix[i, :], A_vec))
- latentRate * y[params.E_ind + i * params.number_compartments])
# I
dydt[params.I_ind + i * params.number_compartments] = (
latentRate * (1 - symptomatic_prob[i]) * y[params.E_ind + i * params.number_compartments]
- removalRate * y[params.I_ind + i * params.number_compartments]
- quarantine_sick
)
# A
dydt[params.A_ind + i * params.number_compartments] = (
latentRate * symptomatic_prob[i] * y[params.E_ind + i * params.number_compartments]
- removalRate * y[params.A_ind + i * params.number_compartments])
# H
dydt[params.H_ind + i * params.number_compartments] = (
removalRate * (hospital_prob[i]) * y[params.I_ind + i * params.number_compartments]
- hospRate * y[params.H_ind + i * params.number_compartments]
# + deathRateNoIcu * (1 - params.death_prob) * max(0,y[params.C_ind + i*params.number_compartments] - ICU_for_this_age) # recovered despite no ICU (0, since now assume death_prob is 1)
+ deathRateICU * (1 - params.death_prob_with_ICU) * min(
y[params.C_ind + i * params.number_compartments], ICU_for_this_age) # recovered from ICU
+ (hospital_prob[i]) * params.quarant_rate * y[params.Q_ind + i * params.number_compartments]
# proportion of removed people who were hospitalised once returned
)
# Critical care (ICU)
dydt[params.C_ind + i * params.number_compartments] = (
min(hospRate * (critical_prob[i]) * y[params.H_ind + i * params.number_compartments],
max(0,
ICU_for_this_age - y[params.C_ind + i * params.number_compartments]
+ deathRateICU * y[params.C_ind + i * params.number_compartments] # with ICU treatment
)
) # amount entering is minimum of: amount of beds available**/number needing it
# **including those that will be made available by new deaths
- deathRateICU * y[params.C_ind + i * params.number_compartments] # with ICU treatment
)
# Uncared - no ICU
dydt[params.U_ind + i * params.number_compartments] = (hospRate * (critical_prob[i]) * y[
params.H_ind + i * params.number_compartments] # number needing care
- min(
hospRate * (critical_prob[i]) * y[params.H_ind + i * params.number_compartments],
max(0,
ICU_for_this_age - y[params.C_ind + i * params.number_compartments]
+ deathRateICU * y[params.C_ind + i * params.number_compartments]
)) # minus number who get it (these entered category C)
- deathRateNoIcu * y[
params.U_ind + i * params.number_compartments]
# without ICU treatment
)
# R
dydt[params.R_ind + i * params.number_compartments] = (
removalRate * (1 - hospital_prob[i]) * y[params.I_ind + i * params.number_compartments]
+ removalRate * y[params.A_ind + i * params.number_compartments]
+ hospRate * (1 - critical_prob[i]) * y[params.H_ind + i * params.number_compartments]
+ (1 - hospital_prob[i]) * params.quarant_rate * y[
params.Q_ind + i * params.number_compartments]
# proportion of removed people who recovered once returned
)
# D
dydt[params.D_ind + i * params.number_compartments] = (deathRateNoIcu * y[
params.U_ind + i * params.number_compartments] # died without ICU treatment (all cases that don't get treatment die)
+ deathRateICU * (params.death_prob_with_ICU) * y[
params.C_ind + i * params.number_compartments]
# died despite attempted ICU treatment
)
# O
dydt[params.O_ind + i * params.number_compartments] = remove_high_risk_people * y[
params.S_ind + i * params.number_compartments] / S_removal
# Q
dydt[params.Q_ind + i * params.number_compartments] = quarantine_sick - params.quarant_rate * y[
params.Q_ind + i * params.number_compartments]
return dydt
##
#--------------------------------------------------------------------
##
def run_model(self, T_stop, beta, latent_rate=None, removal_rate=None, hosp_rate=None, death_rate_ICU=None, death_rate_no_ICU=None):
population = self.params.population
population_frame = self.params.population_frame
control_dict = self.params.control_dict
if latent_rate is None:
latent_rate = self.params.latent_rate
if removal_rate is None:
removal_rate = self.params.removal_rate
if hosp_rate is None:
hosp_rate = self.params.hosp_rate
if death_rate_ICU is None:
death_rate_ICU = self.params.death_rate_with_ICU
if death_rate_no_ICU is None:
death_rate_no_ICU = self.params.death_rate # more params
E0 = 0 # exposed
I0 = 1/population # sympt
A0 = 1/population # asympt
R0 = 0 # recovered
H0 = 0 # hospitalised/needing hospital care
C0 = 0 # critical (cared)
D0 = 0 # dead
O0 = 0 # offsite
Q0 = 0 # quarantined
U0 = 0 # critical (uncared)
S0 = 1 - I0 - R0 - C0 - H0 - D0 - O0 - Q0 - U0
age_categories = int(population_frame.shape[0])
y0 = np.zeros(self.params.number_compartments*age_categories)
population_vector = np.asarray(population_frame.Population_structure)
# initial conditions
for i in range(age_categories):
y0[self.params.S_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * S0
y0[self.params.E_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * E0
y0[self.params.I_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * I0
y0[self.params.A_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * A0
y0[self.params.R_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * R0
y0[self.params.H_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * H0
y0[self.params.C_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * C0
y0[self.params.D_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * D0
y0[self.params.O_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * O0
y0[self.params.Q_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * Q0
y0[self.params.U_ind + i * self.params.number_compartments] = (population_vector[i] / 100) * U0
symptomatic_prob = np.asarray(population_frame.p_symptomatic)
hospital_prob = np.asarray(population_frame.p_hospitalised)
critical_prob = np.asarray(population_frame.p_critical)
sol = ode(self.ode_system).set_f_params(
self.params.infection_matrix,
age_categories,
symptomatic_prob,
hospital_prob,
critical_prob,
beta, # params
latent_rate,removal_rate,hosp_rate,death_rate_ICU,death_rate_no_ICU, # more params
control_dict['better_hygiene'],control_dict['remove_symptomatic'],control_dict['remove_high_risk'],control_dict['ICU_capacity']
)
tim = np.linspace(0,T_stop, T_stop+1) # 1 time value per day
sol.set_initial_value(y0,tim[0])
y_out = np.zeros((len(y0),len(tim)))
i2 = 0
y_out[:,0] = sol.y
for t in tim[1:]:
if sol.successful():
sol.integrate(t)
i2=i2+1
y_out[:,i2] = sol.y
else:
raise RuntimeError('ode solver unsuccessful')
y_plot = np.zeros((len(self.params.categories.keys()), len(tim) ))
for name in self.params.calculated_categories:
y_plot[self.params.categories[name]['index'],:] = y_out[self.params.categories[name]['index'],:]
for i in range(1, population_frame.shape[0]): # age_categories
y_plot[self.params.categories[name]['index'],:] = y_plot[self.params.categories[name]['index'],:] + y_out[self.params.categories[name]['index'] + i*self.params.number_compartments,:]
for name in self.params.change_in_categories: # daily change in
name_changed_var = name[-1] # name of the variable we want daily change of
y_plot[self.params.categories[name]['index'],:] = np.concatenate([[0],np.diff(y_plot[self.params.categories[name_changed_var]['index'],:])])
# finally,
E = y_plot[self.params.categories['CE']['index'],:]
I = y_plot[self.params.categories['CI']['index'],:]
A = y_plot[self.params.categories['CA']['index'],:]
y_plot[self.params.categories['Ninf']['index'],:] = [E[i] + I[i] + A[i] for i in range(len(E))] # change in total number of people with active infection
return {'y': y_out,'t': tim, 'y_plot': y_plot}
#--------------------------------------------------------------------
def generate_percentiles(self, sols):
n_time_points = len(sols[0]['t'])
y_plot = np.zeros((len(self.params.categories.keys()), len(sols) , n_time_points ))
for k, sol in enumerate(sols):
sol['y'] = np.asarray(sol['y'])
for name in self.params.categories.keys():
y_plot[self.params.categories[name]['index'],k,:] = sol['y_plot'][self.params.categories[name]['index']]
y_L95, y_U95, y_LQ, y_UQ, y_median = [np.zeros((len(self.params.categories.keys()),n_time_points)) for i in range(5)]
for name in self.params.categories.keys():
y_L95[self.params.categories[name]['index'],:] = np.asarray([ np.percentile(y_plot[self.params.categories[name]['index'],:,i],2.5) for i in range(n_time_points) ])
y_LQ[self.params.categories[name]['index'],:] = np.asarray([ np.percentile(y_plot[self.params.categories[name]['index'],:,i],25) for i in range(n_time_points) ])
y_UQ[self.params.categories[name]['index'],:] = np.asarray([ np.percentile(y_plot[self.params.categories[name]['index'],:,i],75) for i in range(n_time_points) ])
y_U95[self.params.categories[name]['index'],:] = np.asarray([ np.percentile(y_plot[self.params.categories[name]['index'],:,i],97.5) for i in range(n_time_points) ])
y_median[self.params.categories[name]['index'],:] = np.asarray([statistics.median(y_plot[self.params.categories[name]['index'],:,i]) for i in range(n_time_points) ])
return [y_U95, y_UQ, y_LQ, y_L95, y_median]
def simulate_range_of_R0s(self, t_stop=200): # gives solution for middle R0, as well as solutions for a range of R0s between an upper and lower bound
beta_list = self.params.im_beta_list
largest_eigenvalue = self.params.largest_eigenvalue
sols = []
sols_raw = {}
for beta in beta_list:
result = self.run_model(T_stop=t_stop, beta=beta)
sols.append(result)
sols_raw[beta*largest_eigenvalue/self.params.removal_rate]=result
[y_U95, y_UQ, y_LQ, y_L95, y_median] = self.generate_percentiles(sols)
standard_sol = [self.run_model(T_stop=t_stop, beta=self.params.beta_list[1])]
return sols_raw, standard_sol, [y_U95, y_UQ, y_LQ, y_L95, y_median]
def simulate_over_parameter_range(self, numberOfIterations, t_stop=200):
sols = []
config_dict = []
sols_raw = {}
for ii in tqdm(range(min(numberOfIterations,len(self.params.generated_disease_vectors)))):
latentRate = 1/self.params.generated_disease_vectors.LatentPeriod[ii]
removalRate = 1/self.params.generated_disease_vectors.RemovalPeriod[ii]
beta = removalRate*self.params.generated_disease_vectors.R0[ii]/self.params.largest_eigenvalue
hospRate = 1/self.params.generated_disease_vectors.HospPeriod[ii]
deathRateICU = 1/self.params.generated_disease_vectors.DeathICUPeriod[ii]
deathRateNoIcu = 1/self.params.generated_disease_vectors.DeathNoICUPeriod[ii]
result = self.run_model(T_stop=t_stop, beta=beta,
latent_rate=latentRate,
removal_rate=removalRate,
hosp_rate=hospRate,
death_rate_ICU=deathRateICU,
death_rate_no_ICU=deathRateNoIcu
)
sols.append(result)
Dict = dict(beta = beta,
latentRate = latentRate,
removalRate = removalRate,
hospRate = hospRate,
deathRateICU = deathRateICU,
deathRateNoIcu = deathRateNoIcu
)
config_dict.append(Dict)
sols_raw[(self.params.generated_disease_vectors.R0[ii],latentRate,removalRate,hospRate,deathRateICU,deathRateNoIcu)]=result
[y_U95, y_UQ, y_LQ, y_L95, y_median] = self.generate_percentiles(sols)
# standard run
StandardSol = [self.run_model(T_stop=t_stop, beta=self.params.beta_list[1])]
return sols_raw, StandardSol, [y_U95, y_UQ, y_LQ, y_L95, y_median], config_dict
def simulate_over_parameter_range_parallel(self, numberOfIterations, t_stop, n_processes):
lazy_sols = []
config_dict = []
sols_raw = {}
for ii in range(min(numberOfIterations,len(self.params.generated_disease_vectors))):
latentRate = 1/self.params.generated_disease_vectors.LatentPeriod[ii]
removalRate = 1/self.params.generated_disease_vectors.RemovalPeriod[ii]
beta = removalRate*self.params.generated_disease_vectors.R0[ii]/self.params.largest_eigenvalue
hospRate = 1/self.params.generated_disease_vectors.HospPeriod[ii]
deathRateICU = 1/self.params.generated_disease_vectors.DeathICUPeriod[ii]
deathRateNoIcu = 1/self.params.generated_disease_vectors.DeathNoICUPeriod[ii]
lazy_result = dask.delayed(self.run_model)(T_stop=t_stop, beta=beta,
latent_rate=latentRate,
removal_rate=removalRate,
hosp_rate=hospRate,
death_rate_ICU=deathRateICU,
death_rate_no_ICU=deathRateNoIcu
)
lazy_sols.append(lazy_result)
#sols.append(result)
Dict = dict(beta = beta,
latentRate = latentRate,
removalRate = removalRate,
hospRate = hospRate,
deathRateICU = deathRateICU,
deathRateNoIcu = deathRateNoIcu
)
config_dict.append(Dict)
#TODO: sols_raw[(self.params.generated_disease_vectors.R0[ii],latentRate,removalRate,hospRate,deathRateICU,deathRateNoIcu)]=result
with dask.config.set(scheduler='processes', n_processes=n_processes):
with ProgressBar():
sols = dask.compute(*lazy_sols)
for ii in range(min(numberOfIterations, len(self.params.generated_disease_vectors))):
dct = config_dict[ii]
sols_raw[(self.params.generated_disease_vectors.R0[ii], dct['latentRate'], dct['removalRate'],
dct['hospRate'], dct['deathRateICU'], dct['deathRateNoIcu'])] = sols[ii]
[y_U95, y_UQ, y_LQ, y_L95, y_median] = self.generate_percentiles(sols)
# standard run
StandardSol = [self.run_model(T_stop=t_stop, beta=self.params.beta_list[1])]
return sols_raw, StandardSol, [y_U95, y_UQ, y_LQ, y_L95, y_median], config_dict
def generate_csv(data_to_save, params: Parameters, input_type=None, time_vec=None) -> pd.DataFrame:
population_frame = params.population_frame
category_map = {}
for key in params.categories.keys():
category_map[str(params.categories[key]['index'])] = key
if input_type=='percentile':
csv_sol = np.transpose(data_to_save)
solution_csv = pd.DataFrame(csv_sol)
col_names = []
for i in range(csv_sol.shape[1]):
col_names.append(params.categories[category_map[str(i)]]['longname'])
solution_csv.columns = col_names
solution_csv['Time'] = time_vec
# this is our dataframe to be saved
elif input_type=='raw':
final_frame=pd.DataFrame()
for key, value in tqdm(data_to_save.items()):
csv_sol = np.transpose(value['y']) # age structured
solution_csv = | pd.DataFrame(csv_sol) | pandas.DataFrame |
"""
Pipeline Evaluation module
This module runs all the steps used and allows you to visualize them.
"""
import datetime
from typing import List, Tuple, Union
import pandas as pd
from sklearn.pipeline import Pipeline
from .evaluation import Evaluator
from .feature_reduction import FeatureReductor
from .labeling import Labeler
from .splitting import Splitter
from .utils import Picklable, visualize_data, visualize_labels
class PipelineEvaluator(Picklable):
"""
PipelineEvaluator contains all modules and triggers them.
"""
def __init__(
self,
labeler: Labeler = None,
splitter: Splitter = None,
pipeline: Pipeline = None,
feature_reductor: FeatureReductor = None,
model=None,
evaluator: Evaluator = None,
dropna: bool = True,
downprojector=None,
visualize: Union[bool, List[str]] = False,
verbose: bool = True,
):
self.labeler = labeler
self.splitter = splitter
self.pipeline = pipeline
self.feature_reductor = feature_reductor
self.model = model
self.evaluator = evaluator
self.dropna = dropna
self.downprojector = downprojector
self.visualize = visualize
self.verbose = verbose
if isinstance(self.visualize, bool):
if self.visualize:
self.visualize = [
"labeler",
"splitter",
"pipeline",
"feature_reductor",
"model",
"evaluator",
]
else:
self.visualize = []
def _log(self, text) -> None:
"""
Print actual time and provided text if verobse is True.
Parameters
----------
text: string
Comment added to printed time.
"""
if self.verbose:
print(datetime.datetime.now().time().strftime("%H:%M:%S.%f")[:-3], text)
def _drop_na(self, X: pd.DataFrame, y: pd.Series) -> Tuple[pd.DataFrame, pd.Series]:
"""
Drop rows with NaN values from begining.
Returns
-------
X, y : tupple (pd.DataFrame, pd.Series)
X as data (with features) and y as labels.
"""
original_shape = X.shape
X.dropna(axis=1, thresh=int(X.shape[0] * 0.9), inplace=True)
cut_number = X.isna().sum().max()
X = X.iloc[cut_number:, :]
if X.isna().sum().sum() > 0:
X = X.dropna(axis=0)
y = y.loc[X.index]
self._log(
f"\tOriginal shape:\t\t{original_shape}; \n\t\tshape after removing NaNs: {X.shape}."
)
return X, y
def run(self, data=None):
"""
Run each module on provided data.
Parameters
----------
data : array-like
Data to evaluate the pipeline on.
Returns
-------
result : dict
Dict of calculated metric values labeled by their names.
"""
if self.labeler is not None:
self._log("Labeling data")
self.labels = self.labeler.transform(data)
if "labeler" in self.visualize:
self.labeler.visualize(labels=self.labels)
if self.splitter is not None:
self._log("Splitting data")
(
self.X_train,
self.X_test,
self.y_train,
self.y_test,
) = self.splitter.transform(X=data, y=self.labels)
if "splitter" in self.visualize:
self.splitter.visualize(X=[self.X_train, self.X_test])
if self.pipeline is not None:
self._log("Fitting pipeline")
self.X_train = self.pipeline.fit_transform(self.X_train, self.y_train)
self._log("Applying pipeline transformations")
self.X_test = self.pipeline.transform(self.X_test)
if self.dropna:
self.X_train, self.y_train = self._drop_na(X=self.X_train, y=self.y_train)
self.X_test, self.y_test = self._drop_na(X=self.X_test, y=self.y_test)
if "pipeline" in self.visualize:
visualize_data(
X=self.X_train,
y=self.y_train,
downprojector=self.downprojector,
title="Visualization of pipeline output",
)
if self.feature_reductor is not None:
self._log("Applying feature reduction")
self.feature_reductor.fit(self.X_train, self.y_train)
self.X_train = self.feature_reductor.transform(self.X_train)
self.X_test = self.feature_reductor.transform(self.X_test)
if "feature_reductor" in self.visualize:
self.feature_reductor.visualize(
X=self.X_train,
y=self.y_train,
downprojector=self.downprojector,
title="Visualization of FeatureReductor output",
)
if self.model is not None:
self._log("Fitting model")
self.model.fit(self.X_train, self.y_train)
if "model" in self.visualize:
self.y_pred = self.model.predict(self.X_train)
if len(self.y_pred.shape) == 1 or self.y_pred.shape[1] == 1:
self.y_pred = | pd.Series(self.y_pred, index=self.X_train.index) | pandas.Series |
import pandas as pd
from glob import glob
def process_stream_sessions(raw_dir='../data/raw/Stream Session*.csv',
save_dir=None):
ss_files = glob(raw_dir)
ds = []
for s_id, file in enumerate(ss_files):
fn = file.split('/')[-1]
d = pd.read_csv(file)
d['filename'] = fn
d['session_id'] = s_id
ds.append(d)
ss = pd.concat(ds)
ss = ss.reset_index(drop=True)
ss['filename'] = ss['filename'].str.replace(' (1)','', regex=False)
ss['end_date'] = ss['filename'].str.split(' ').str[-1].str.replace('.csv','', regex=False)
ss['start_date'] = ss['filename'].str.split(' ').str[-3]
ss['start_date'] = pd.to_datetime(ss['start_date'].str.replace('_','-'))
ss['end_date'] = pd.to_datetime(ss['end_date'].str.replace('_','-'))
ss['start_date'] = pd.to_datetime(ss['start_date'])
ss['end_date'] = pd.to_datetime(ss['end_date'])
num_cols = ['Viewers', 'Live Views', 'New Followers',
'Chatters', 'Chat Messages', 'Ad Breaks', 'Subscriptions',
'Clips Created', 'All Clip Views']
ss[num_cols] = ss[num_cols].fillna(0).astype('int')
ss['overnight'] = ss['start_date'] != ss['end_date']
ss['hour'] = pd.to_datetime(ss['Timestamp']).dt.hour
ss['minute'] = pd.to_datetime(ss['Timestamp']).dt.minute
ss['date'] = ss.apply(lambda row: row['start_date'] if row['hour'] > 12 else row['end_date'], axis=1)
ss['datetime'] = pd.to_datetime(ss['date'].astype('str') + ' ' + ss['Timestamp'])
ss['session'] = ss['filename'].str.strip('.csv')
keep_cols = ['session','datetime', 'Viewers',
'Live Views', 'New Followers',
'Chatters', 'Chat Messages',
'Ad Breaks', 'Subscriptions',
'Clips Created', 'All Clip Views', 'session_id']
if save_dir is not None:
ss[keep_cols].to_csv('../data/processed/StreamSession.csv', index=False)
return ss[keep_cols]
def process_channel_analytics(raw_dir="../data/raw/Channel Analytics*.csv"):
ca_files = glob(raw_dir)
ds = []
for file in ca_files:
fn = file.split("/")[-1]
d = | pd.read_csv(file) | pandas.read_csv |
import pandas as pd
import os
# this file contains variables and names given in turkish words
# blood transfusions related data
writer = pd.ExcelWriter('tümü.xlsx', engine='xlsxwriter')
writer2 = pd.ExcelWriter('ozet.xlsx', engine='xlsxwriter')
writer3 = pd.ExcelWriter('hasta başı toplam transfüzyon sayısı.xlsx', engine='xlsxwriter')
file_list = []
for file in os.listdir("veriler"):
if file.endswith(".xls") or file.endswith(".xlsx"):
file_list.append(file)
continue
else:
continue
print(file_list)
hast_list = [s.strip('.xls') for s in file_list]
print(hast_list)
pivot = pd.DataFrame(columns=['HGB > 10', "Kanıtsız ES Sayı", "Son 24s Kanıtsız ES", 'Toplam ES Sayısı',
"PLT > 100.000", "Kanıtsız PLT Sayı", "Son 24s Kanıtsız PLT", "Toplam PLT Trans.",
"INR < 1,5", "Kanıtsız TDP Sayı", "Son 24s Kanıtsız TDP", "Top TDP Trans.",
"Toplam End. Dışı", "Toplam Kanıtsız", " Toplam Son 24s Kanıtsız", "Toplam Transfüzyon",
"Toplam Hasta Sayısı"], index=hast_list)
for adi in hast_list:
print(f'Şu an {adi} hastanesi işlenmekte...')
kan_ham_tablo = pd.read_excel("veriler/" + adi + ".xlsx", dtype='object')
kan_ham_tablo['Çıkış Tarihi'] = pd.DatetimeIndex(kan_ham_tablo['Çıkış Tarihi'], dayfirst=True)
print(kan_ham_tablo.info())
hasta_sayisi = kan_ham_tablo['Dosya No'].nunique()
toplam_transfuzyon = kan_ham_tablo['Dosya No'].count()
kan_ham_tablo['HGB'] = kan_ham_tablo["Geçmiş"].str.extract(r'(HGB = \d+\.\d+|HGB = \d+)', expand=False)
kan_ham_tablo['HGB Değer'] = kan_ham_tablo["HGB"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['PLT'] = kan_ham_tablo["Geçmiş"].str.extract(r'(PLT = \d+\.\d+|PLT = \d+)', expand=False)
kan_ham_tablo['PLT Değer'] = kan_ham_tablo["PLT"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['aPTT'] = kan_ham_tablo["Geçmiş"].str.extract(r'(aPTT = \d+\.\d+|aPTT = \d+)', expand=False)
kan_ham_tablo['aPTT Değer'] = kan_ham_tablo["aPTT"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['PT'] = kan_ham_tablo["Geçmiş"].str.extract(r'(PT = \d+\.\d+|PT = \d+)', expand=False)
kan_ham_tablo['PT Değer'] = kan_ham_tablo["PT"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['INR'] = kan_ham_tablo["Geçmiş"].str.extract(r'(INR = \d+\.\d+|INR = \d+)', expand=False)
kan_ham_tablo['INR Değer'] = kan_ham_tablo["INR"].str.extract(r'(\d+\.\d+|\d+)', expand=False)
kan_ham_tablo['HGB Değer'] = kan_ham_tablo['HGB Değer'].astype(float)
kan_ham_tablo['PLT Değer'] = kan_ham_tablo['PLT Değer'].astype(float)
kan_ham_tablo['aPTT Değer'] = kan_ham_tablo['aPTT Değer'].astype(float)
kan_ham_tablo['PT Değer'] = kan_ham_tablo['PT Değer'].astype(float)
kan_ham_tablo['INR Değer'] = kan_ham_tablo['INR Değer'].astype(float)
kayit_icin = kan_ham_tablo.drop(["HGB", "PLT", "aPTT", "PT", "INR"], 1)
kayit_icin.to_excel(writer, sheet_name=adi)
pivot_hasta = pd.pivot_table(kayit_icin, values='Kan Ürünü Cinsi', index='Dosya No', aggfunc='count')
pivot_hasta = pivot_hasta.sort_values(by='Kan Ürünü Cinsi', ascending=False)
pivot_hasta.to_excel(writer3, sheet_name=adi)
kayit_icin['gecmis_tarih'] = kayit_icin["Geçmiş"].str.extract(r'(\d+\.\d+.\d+ \d+:\d+)', expand=False)
kayit_icin['gecmis_tarih'] = pd.DatetimeIndex(kayit_icin['gecmis_tarih'], dayfirst=True)
kayit_icin['tarih_fark'] = kayit_icin['Çıkış Tarihi'] - kayit_icin['gecmis_tarih']
hgb_trans_toplam = kayit_icin[kayit_icin['Kan Ürünü Cinsi'].str.contains('ritrosit')]
hgb_end_disi = hgb_trans_toplam[hgb_trans_toplam['HGB Değer'] > 10]
hgb_end_disi = len(hgb_end_disi)
hgb_no_kanit = hgb_trans_toplam[~hgb_trans_toplam["Geçmiş"].str.contains('HGB', na=False)]
hgb_no_kanit = len(hgb_no_kanit)
hgb_dolu_gecmis = hgb_trans_toplam[hgb_trans_toplam["Geçmiş"].str.contains('HGB', na=False)]
hgb_date_diff = hgb_dolu_gecmis[hgb_dolu_gecmis['tarih_fark'] > pd.Timedelta(days=1)]
print(hgb_date_diff)
hgb_no_kanit_24 = len(hgb_date_diff)
hgb_trans_toplam = len(hgb_trans_toplam)
if hgb_trans_toplam == 0:
hgb_oran = 0
else:
hgb_oran = hgb_end_disi / hgb_trans_toplam
plt_trans_toplam = kayit_icin[kayit_icin['Kan Ürünü Cinsi'].str.contains('rombosit|PLT')]
plt_end_disi = plt_trans_toplam[plt_trans_toplam['PLT Değer'] > 100]
plt_end_disi = len(plt_end_disi)
plt_no_kanit = plt_trans_toplam[~plt_trans_toplam["Geçmiş"].str.contains('PLT', na=False)]
plt_no_kanit = len(plt_no_kanit)
plt_dolu_gecmis = plt_trans_toplam[plt_trans_toplam["Geçmiş"].str.contains('PLT', na=False)]
plt_date_diff = plt_dolu_gecmis[plt_dolu_gecmis['tarih_fark'] > | pd.Timedelta(days=1) | pandas.Timedelta |
import os
import glob
import pandas as pd
import sys
os.chdir(".")
pattern = sys.argv[1]+"*.csv"
all_filenames = [i for i in glob.glob(pattern)]
#combine all files in the list
#combined_csv = pd.concat([pd.read_csv(f, index_col=0) for f in all_filenames ], axis=0, join='outer', ignore_index=False, sort=False)
#export to csv
combined_csv = pd.concat([pd.read_csv(f, index_col=0) for f in all_filenames ], sort=False, axis=1)
df = | pd.DataFrame(combined_csv) | pandas.DataFrame |
""":func:`~pandas.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
import struct
from functools import partial
import pandas as pd
from pandas import compat
from pandas.compat import StringIO, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
from pandas.computation.common import NameResolutionError
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from pandas.computation.ops import UndefinedVariableError
def _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None,
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(gbls=global_dict, lcls=local_dict, level=level,
resolvers=resolvers, target=target)
def _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys):
"""Make sure that variables in resolvers don't overlap with locals or
globals.
"""
res_locals = list(com.intersection(resolver_keys, local_keys))
if res_locals:
msg = "resolvers and locals overlap on names {0}".format(res_locals)
raise NameResolutionError(msg)
res_globals = list(com.intersection(resolver_keys, global_keys))
if res_globals:
msg = "resolvers and globals overlap on names {0}".format(res_globals)
raise NameResolutionError(msg)
def _replacer(x, pad_size):
"""Replace a number with its padded hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin).replace('0x', '').rjust(pad_size, '0')
def _raw_hex_id(obj, pad_size=2):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(_replacer(x, pad_size) for x in packed)
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
added by pandas.
Parameters
----------
gbls : dict or None, optional, default None
lcls : dict or Scope or None, optional, default None
level : int, optional, default 1
resolvers : list-like or None, optional, default None
Attributes
----------
globals : dict
locals : dict
level : int
resolvers : tuple
resolver_keys : frozenset
"""
__slots__ = ('globals', 'locals', 'resolvers', '_global_resolvers',
'resolver_keys', '_resolver', 'level', 'ntemps', 'target')
def __init__(self, gbls=None, lcls=None, level=1, resolvers=None,
target=None):
self.level = level
self.resolvers = tuple(resolvers or [])
self.globals = dict()
self.locals = dict()
self.target = target
self.ntemps = 1 # number of temporary variables in this scope
if isinstance(lcls, Scope):
ld, lcls = lcls, dict()
self.locals.update(ld.locals.copy())
self.globals.update(ld.globals.copy())
self.resolvers += ld.resolvers
if ld.target is not None:
self.target = ld.target
self.update(ld.level)
frame = sys._getframe(level)
try:
self.globals.update(gbls or frame.f_globals)
self.locals.update(lcls or frame.f_locals)
finally:
del frame
# add some useful defaults
self.globals['Timestamp'] = pd.lib.Timestamp
self.globals['datetime'] = datetime
# SUCH a hack
self.globals['True'] = True
self.globals['False'] = False
# function defs
self.globals['list'] = list
self.globals['tuple'] = tuple
res_keys = (list(o.keys()) for o in self.resolvers)
self.resolver_keys = frozenset(reduce(operator.add, res_keys, []))
self._global_resolvers = self.resolvers + (self.locals, self.globals)
self._resolver = None
self.resolver_dict = {}
for o in self.resolvers:
self.resolver_dict.update(dict(o))
def __unicode__(self):
return com.pprint_thing(
'locals: {0}\nglobals: {0}\nresolvers: '
'{0}\ntarget: {0}'.format(list(self.locals.keys()),
list(self.globals.keys()),
list(self.resolver_keys),
self.target))
def __getitem__(self, key):
return self.resolve(key, globally=False)
def resolve(self, key, globally=False):
resolvers = self.locals, self.globals
if globally:
resolvers = self._global_resolvers
for resolver in resolvers:
try:
return resolver[key]
except KeyError:
pass
def update(self, level=None):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
# we are always 2 levels below the caller
# plus the caller may be below the env level
# in which case we need addtl levels
sl = 2
if level is not None:
sl += level
# add sl frames to the scope starting with the
# most distant and overwritting with more current
# makes sure that we can capture variable scope
frame = inspect.currentframe()
try:
frames = []
while sl >= 0:
frame = frame.f_back
sl -= 1
if frame is None:
break
frames.append(frame)
for f in frames[::-1]:
self.locals.update(f.f_locals)
self.globals.update(f.f_globals)
finally:
del frame, frames
def add_tmp(self, value, where='locals'):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
where : basestring, optional, default 'locals', {'locals', 'globals'}
What scope to add the value to.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
d = getattr(self, where, None)
if d is None:
raise AttributeError("Cannot add value to non-existent scope "
"{0!r}".format(where))
if not isinstance(d, dict):
raise TypeError("Cannot add value to object of type {0!r}, "
"scope must be a dictionary"
"".format(type(d).__name__))
name = 'tmp_var_{0}_{1}_{2}'.format(type(value).__name__, self.ntemps,
_raw_hex_id(self))
d[name] = value
# only increment if the variable gets put in the scope
self.ntemps += 1
return name
def remove_tmp(self, name, where='locals'):
d = getattr(self, where, None)
if d is None:
raise AttributeError("Cannot remove value from non-existent scope "
"{0!r}".format(where))
if not isinstance(d, dict):
raise TypeError("Cannot remove value from object of type {0!r}, "
"scope must be a dictionary"
"".format(type(d).__name__))
del d[name]
self.ntemps -= 1
def _rewrite_assign(source):
"""Rewrite the assignment operator for PyTables expression that want to use
``=`` as a substitute for ``==``.
"""
res = []
g = tokenize.generate_tokens(StringIO(source).readline)
for toknum, tokval, _, _, _ in g:
res.append((toknum, '==' if tokval == '=' else tokval))
return tokenize.untokenize(res)
def _replace_booleans(source):
"""Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
"""
return source.replace('|', ' or ').replace('&', ' and ')
def _replace_locals(source, local_symbol='@'):
"""Replace local variables with a syntacticall valid name."""
return source.replace(local_symbol, _LOCAL_TAG)
def _preparse(source):
"""Compose assignment and boolean replacement."""
return _replace_booleans(_rewrite_assign(source))
def _is_type(t):
"""Factory for a type checking function of type ``t`` or tuple of types."""
return lambda x: isinstance(x.value, t)
_is_list = _is_type(list)
_is_str = _is_type(string_types)
# partition all AST nodes
_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and
issubclass(x, ast.AST),
(getattr(ast, node) for node in dir(ast))))
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names)
_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
_mod_nodes = _filter_nodes(ast.mod)
_stmt_nodes = _filter_nodes(ast.stmt)
_expr_nodes = _filter_nodes(ast.expr)
_expr_context_nodes = _filter_nodes(ast.expr_context)
_slice_nodes = _filter_nodes(ast.slice)
_boolop_nodes = _filter_nodes(ast.boolop)
_operator_nodes = _filter_nodes(ast.operator)
_unary_op_nodes = _filter_nodes(ast.unaryop)
_cmp_op_nodes = _filter_nodes(ast.cmpop)
_comprehension_nodes = _filter_nodes(ast.comprehension)
_handler_nodes = _filter_nodes(ast.excepthandler)
_arguments_nodes = _filter_nodes(ast.arguments)
_keyword_nodes = _filter_nodes(ast.keyword)
_alias_nodes = _filter_nodes(ast.alias)
# nodes that we don't support directly but are needed for parsing
_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])
_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',
'DictComp', 'SetComp', 'Repr', 'Lambda',
'Set', 'AST', 'Is', 'IsNot'])
# these nodes are low priority or won't ever be supported (e.g., AST)
_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |
_arguments_nodes | _keyword_nodes | _alias_nodes |
_expr_context_nodes | _unsupported_expr_nodes) -
_hacked_nodes)
# we're adding a different assignment in some cases to be equality comparison
# and we don't want `stmt` and friends in their so get only the class whose
# names are capitalized
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &
_base_supported_nodes)
assert not _unsupported_nodes & _base_supported_nodes, _msg
def _node_not_implemented(node_name, cls):
"""Return a function that raises a NotImplementedError with a passed node
name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
def disallow(nodes):
"""Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
disallowed : callable
"""
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = 'visit_{0}'.format(node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed
def _op_maker(op_class, op_symbol):
"""Return a function to create an op class with its symbol already passed.
Returns
-------
f : callable
"""
def f(self, node, *args, **kwargs):
"""Return a partial function with an Op subclass with an operator
already passed.
Returns
-------
f : callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
return f
_op_classes = {'binary': BinOp, 'unary': UnaryOp}
def add_ops(op_classes):
"""Decorator to add default implementation of ops."""
def f(cls):
for op_attr_name, op_class in compat.iteritems(op_classes):
ops = getattr(cls, '{0}_ops'.format(op_attr_name))
ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, 'visit_{0}'.format(op_node), made_op)
return cls
return f
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
"""Custom ast walker. Parsers of other engines should subclass this class
if necessary.
Parameters
----------
env : Scope
engine : str
parser : str
preparser : callable
"""
const_type = Constant
term_type = Term
binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',
'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',
None, 'Pow', 'FloorDiv', 'Mod')
binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
unary_ops = _unary_ops_syms
unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'
unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
rewrite_map = {
ast.Eq: ast.In,
ast.NotEq: ast.NotIn,
ast.In: ast.In,
ast.NotIn: ast.NotIn
}
def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
self.engine = engine
self.parser = parser
self.preparser = preparser
self.assigner = None
def visit(self, node, **kwargs):
if isinstance(node, string_types):
clean = self.preparser(node)
node = ast.fix_missing_locations(ast.parse(clean))
elif not isinstance(node, ast.AST):
raise TypeError("Cannot visit objects of type {0!r}"
"".format(node.__class__.__name__))
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method)
return visitor(node, **kwargs)
def visit_Module(self, node, **kwargs):
if len(node.body) != 1:
raise SyntaxError('only a single expression is allowed')
expr = node.body[0]
return self.visit(expr, **kwargs)
def visit_Expr(self, node, **kwargs):
return self.visit(node.value, **kwargs)
def _rewrite_membership_op(self, node, left, right):
# the kind of the operator (is actually an instance)
op_instance = node.op
op_type = type(op_instance)
# must be two terms and the comparison operator must be ==/!=/in/not in
if is_term(left) and is_term(right) and op_type in self.rewrite_map:
left_list, right_list = map(_is_list, (left, right))
left_str, right_str = map(_is_str, (left, right))
# if there are any strings or lists in the expression
if left_list or right_list or left_str or right_str:
op_instance = self.rewrite_map[op_type]()
# pop the string variable out of locals and replace it with a list
# of one string, kind of a hack
if right_str:
self.env.remove_tmp(right.name)
name = self.env.add_tmp([right.value])
right = self.term_type(name, self.env)
if left_str:
self.env.remove_tmp(left.name)
name = self.env.add_tmp([left.value])
left = self.term_type(name, self.env)
op = self.visit(op_instance)
return op, op_instance, left, right
def _possibly_transform_eq_ne(self, node, left=None, right=None):
if left is None:
left = self.visit(node.left, side='left')
if right is None:
right = self.visit(node.right, side='right')
op, op_class, left, right = self._rewrite_membership_op(node, left,
right)
return op, op_class, left, right
def _possibly_eval(self, binop, eval_in_python):
# eval `in` and `not in` (for now) in "partial" python space
# things that can be evaluated in "eval" space will be turned into
# temporary variables. for example,
# [1,2] in a + 2 * b
# in that case a + 2 * b will be evaluated using numexpr, and the "in"
# call will be evaluated using isin (in python space)
return binop.evaluate(self.env, self.engine, self.parser,
self.term_type, eval_in_python)
def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
eval_in_python=('in', 'not in'),
maybe_eval_in_python=('==', '!=', '<', '>',
'<=', '>=')):
res = op(lhs, rhs)
if self.engine != 'pytables':
if (res.op in _cmp_ops_syms
and getattr(lhs, 'is_datetime', False)
or getattr(rhs, 'is_datetime', False)):
# all date ops must be done in python bc numexpr doesn't work
# well with NaT
return self._possibly_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs, 'return_type', None) == object
or getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
def visit_BinOp(self, node, **kwargs):
op, op_class, left, right = self._possibly_transform_eq_ne(node)
return self._possibly_evaluate_binop(op, op_class, left, right)
def visit_Div(self, node, **kwargs):
return lambda lhs, rhs: Div(lhs, rhs,
truediv=self.env.locals['truediv'])
def visit_UnaryOp(self, node, **kwargs):
op = self.visit(node.op)
operand = self.visit(node.operand)
return op(operand)
def visit_Name(self, node, **kwargs):
return self.term_type(node.id, self.env, **kwargs)
def visit_NameConstant(self, node, **kwargs):
return self.const_type(node.value, self.env)
def visit_Num(self, node, **kwargs):
return self.const_type(node.n, self.env)
def visit_Str(self, node, **kwargs):
name = self.env.add_tmp(node.s)
return self.term_type(name, self.env)
def visit_List(self, node, **kwargs):
name = self.env.add_tmp([self.visit(e).value for e in node.elts])
return self.term_type(name, self.env)
visit_Tuple = visit_List
def visit_Index(self, node, **kwargs):
""" df.index[4] """
return self.visit(node.value)
def visit_Subscript(self, node, **kwargs):
value = self.visit(node.value)
slobj = self.visit(node.slice)
result = pd.eval(slobj, local_dict=self.env, engine=self.engine,
parser=self.parser)
try:
# a Term instance
v = value.value[result]
except AttributeError:
# an Op instance
lhs = pd.eval(value, local_dict=self.env, engine=self.engine,
parser=self.parser)
v = lhs[result]
name = self.env.add_tmp(v)
return self.term_type(name, env=self.env)
def visit_Slice(self, node, **kwargs):
""" df.index[slice(4,6)] """
lower = node.lower
if lower is not None:
lower = self.visit(lower).value
upper = node.upper
if upper is not None:
upper = self.visit(upper).value
step = node.step
if step is not None:
step = self.visit(step).value
return slice(lower, upper, step)
def visit_Assign(self, node, **kwargs):
"""
support a single assignment node, like
c = a + b
set the assigner at the top level, must be a Name node which
might or might not exist in the resolvers
"""
if len(node.targets) != 1:
raise SyntaxError('can only assign a single expression')
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('left hand side of an assignment must be a '
'single name')
if self.env.target is None:
raise ValueError('cannot assign without a target object')
try:
assigner = self.visit(node.targets[0], **kwargs)
except UndefinedVariableError:
assigner = node.targets[0].id
self.assigner = getattr(assigner, 'name', assigner)
if self.assigner is None:
raise SyntaxError('left hand side of an assignment must be a '
'single resolvable name')
return self.visit(node.value, **kwargs)
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx
if isinstance(ctx, ast.Load):
# resolve the value
resolved = self.visit(value).value
try:
v = getattr(resolved, attr)
name = self.env.add_tmp(v)
return self.term_type(name, self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def visit_Call(self, node, side=None, **kwargs):
# this can happen with: datetime.datetime
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
res = self.visit(node.func)
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
args = [self.visit(targ).value for targ in node.args]
if node.starargs is not None:
args = args + self.visit(node.starargs).value
keywords = {}
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
keywords[key.arg] = self.visit(key.value).value
if node.kwargs is not None:
keywords.update(self.visit(node.kwargs).value)
return self.const_type(res(*args, **keywords), self.env)
def translate_In(self, op):
return op
def visit_Compare(self, node, **kwargs):
ops = node.ops
comps = node.comparators
# base case: we have something like a CMP b
if len(comps) == 1:
op = self.translate_In(ops[0])
binop = ast.BinOp(op=op, left=node.left, right=comps[0])
return self.visit(binop)
# recursive case: we have a chained comparison, a CMP b CMP c, etc.
left = node.left
values = []
for op, comp in zip(ops, comps):
new_node = self.visit(ast.Compare(comparators=[comp], left=left,
ops=[self.translate_In(op)]))
left = comp
values.append(new_node)
return self.visit(ast.BoolOp(op=ast.And(), values=values))
def _try_visit_binop(self, bop):
if isinstance(bop, (Op, Term)):
return bop
return self.visit(bop)
def visit_BoolOp(self, node, **kwargs):
def visitor(x, y):
lhs = self._try_visit_binop(x)
rhs = self._try_visit_binop(y)
op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
rhs)
return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
operands = node.values
return reduce(visitor, operands)
_python_not_supported = frozenset(['Dict', 'Call', 'BoolOp', 'In', 'NotIn'])
_numexpr_supported_calls = frozenset(_reductions + _mathops)
@disallow((_unsupported_nodes | _python_not_supported) -
(_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn',
'Tuple'])))
class PandasExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser,
preparser=lambda x: _replace_locals(_replace_booleans(x))):
super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)
@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not']))
class PythonExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser, preparser=lambda x: x):
super(PythonExprVisitor, self).__init__(env, engine, parser,
preparser=preparser)
class Expr(StringMixin):
"""Object encapsulating an expression.
Parameters
----------
expr : str
engine : str, optional, default 'numexpr'
parser : str, optional, default 'pandas'
env : Scope, optional, default None
truediv : bool, optional, default True
level : int, optional, default 2
"""
def __init__(self, expr, engine='numexpr', parser='pandas', env=None,
truediv=True, level=2):
self.expr = expr
self.env = _ensure_scope(level=level, local_dict=env)
self.engine = engine
self.parser = parser
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
self.truediv = truediv
@property
def assigner(self):
return getattr(self._visitor, 'assigner', None)
def __call__(self):
self.env.locals['truediv'] = self.truediv
return self.terms(self.env)
def __unicode__(self):
return com.pprint_thing(self.terms)
def __len__(self):
return len(self.expr)
def parse(self):
"""Parse an expression"""
return self._visitor.visit(self.expr)
def align(self):
"""align a set of Terms"""
return self.terms.align(self.env)
@property
def names(self):
"""Get the names in an expression"""
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in | com.flatten(self.terms) | pandas.core.common.flatten |
# 1584927559
import task_submit
# import task_submit_optimus
import task_submit_raw
from task_submit_raw import VGGTask,RESTask,RETask,DENTask,XCETask
import random
import kubernetes
import influxdb
import kubernetes
import signal
from TimeoutException import TimeoutError,Myhandler
import yaml
import requests
from multiprocessing import Process
import multiprocessing
import urllib
import urllib3
import time
import operator
import numpy as np
# from utils import Timer
from sklearn.externals import joblib
from sklearn.ensemble import GradientBoostingRegressor,RandomForestRegressor
import time
'''
修改:
1.不用给出调度策略
2.初始资源分配不准确
3.初始设置PS和Worker数量随机设置
4资源不调整,节点也不调整了
5.修改缓冲区策略
'''
# from sklearn.preprocessing import MinMaxScaler
np.set_printoptions(suppress=True) #设置print选项的参数
import os
import json
import math
import pandas as pd
import argparse
import random
import multiprocessing
import time
from pytz import UTC
from dateutil import parser
from datetime import datetime
import psutil
import socket
from max_heap import MaxHeap
import worker_queue
# from worker_queue import value_free_load,value_weight_load
from Global_client import Global_Influx
aToken = '<KEY>'
aTokenw = '<KEY>'
LOSSHOST = '192.168.128.5'
LOSSPORT = 12527
DNA_SIZE = 4
XBOUND = [0.8,2]
XBOUND2 = [0.5,0.95]
YBOUND2 = [0.65,0.95]
YBOUND = [1,3]
CROSSOVER_RATE = 0.8
POP_SIZE = 16
N_GENERATIONS = 8
def load_config(config_file):
# # json串是一个字符串
# f = open('product.json', encoding='utf-8')
# res = f.read()
# product_dic = json.loads(res) # 把json串,变成python的数据类型,只能转换json串内容
# print(product_dic)
# print(product_dic['iphone'])
# # t = json.load(f)
# # print(t) #传一个文件对象,它会帮你直接读json文件,并转换成python数据
# # print(t['iphone'])
# f.close()
f = open(config_file,encoding='utf-8')
res = f.read()
config_content = json.loads(res)
f.close()
return config_content
def save_config(config,filename):
config_content = {}
for key,value in config.items():
# if key != 'job' and key != 'ns':
config_content[key] = value
# task_content['task_id'] = tasks['task_id']
fw = open(filename, 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dic_json = json.dumps(config_content, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fw.write(dic_json)
fw.close()
def deletehelp(delete_job_name,v1):
try:
v1.delete_namespace(delete_job_name)
except Exception as eeeeee:
print(eeeeee)
command0 = "kubectl get namespace " + delete_job_name + " -o json > /tfdata/tfcnn/deletebuf/" + delete_job_name + ".json"
os.system(command0)
tmp = load_config("/tfdata/tfcnn/deletebuf/" + delete_job_name + ".json")
tmp["spec"]["finalizers"] = []
save_config(tmp, "/tfdata/tfcnn/deletebuf/" + delete_job_name + ".json")
try:
command1 = 'curl -k -H "Content-Type: application/json" -X PUT --data-binary @/tfdata/tfcnn/deletebuf/' + delete_job_name + '.json http://127.0.0.1:8081/api/v1/namespaces/'+delete_job_name+'/finalize'
os.system(command1)
except Exception as helpe:
print(helpe)
commandopen = 'kubectl proxy --port=8081'
os.system(commandopen)
os.system(command1)
def deletehelp2(delete_job_name,v1):
v1.delete_namespace(delete_job_name)
command0 = "kubectl get namespace " + delete_job_name + " -o json > /tfdata/tfcnn/deletebuf/" + delete_job_name + ".json"
os.system(command0)
tmp = load_config("/tfdata/tfcnn/deletebuf/" + delete_job_name + ".json")
tmp["spec"]["finalizers"] = []
save_config(tmp, "/tfdata/tfcnn/deletebuf/" + delete_job_name + ".json")
try:
command1 = 'curl -k -H "Content-Type: application/json" -X PUT --data-binary @/tfdata/tfcnn/deletebuf/' + delete_job_name + '.json http://127.0.0.1:8081/api/v1/namespaces/' + delete_job_name + '/finalize'
os.system(command1)
except Exception as helpe:
print(helpe)
commandopen = 'kubectl proxy --port=8081'
os.system(commandopen)
os.system(command1)
def parse():
parser = argparse.ArgumentParser(description="Node Monitor")
parser.add_argument('--save_path', default='/tfdata/nodedata', help='save path')
parser.add_argument('--database',default="NODEMESSAGE",help="save database")
parser.add_argument('--derivation',default=10,help='sampling rate')
parser.add_argument('--measurement',default="NODEMESSAGE",help="save measurement")
# parser.add_argument('--train_pg', action='store_true', help='whether train policy gradient')
# parser.add_argument('--train_dqn', action='store_true', help='whether train DQN')
# parser.add_argument('--test_pg', action='store_true', help='whether test policy gradient')
# parser.add_argument('--test_dqn', action='store_true', help='whether test DQN')
args = parser.parse_args()
return args
def update_token():
cacheData = os.popen(
"echo $(kubectl describe secret $(kubectl get secret -n kube-system | grep ^admin-user | awk '{print $1}') -n kube-system | grep -E '^token'| awk '{print $2}')").read()
cacheToken = cacheData[:-1]
newToken = str(cacheToken)
return newToken
def make_headers(Token):
text = 'Bearer ' + Token
headers = {'Authorization': text}
return headers
def catch_message(url):
global aToken
aToken = update_token()
headers = make_headers(aToken)
response = requests.get(url,headers=headers,verify=False)
res_json = response.json()
return res_json
def database_create(databasename):
database_list = Global_Influx.Client_all.get_list_database()
creating = True
for db in database_list:
dbl = list(db.values())
if databasename in dbl:
creating = False
break
if creating:
Global_Influx.Client_all.create_database(databasename)
# Global_Influx.Client_all.create_database(databasename)
def tongji_adjust_number(aim_list):
tongji_wenjian = load_config('modnum.json')
aim_key_lists = list(tongji_wenjian.keys())
for i in aim_list:
if i in aim_key_lists:
tongji_wenjian[i]+=1
else:
tongji_wenjian[i]=1
save_config(tongji_wenjian,'modnum.json')
def tongji_waiting_queue(submit_job_name,time_submit_now):
waiting_time = load_config('waiting_time.json')
waited = list(waiting_time.keys())
if submit_job_name not in waiting_time:
waiting_time[submit_job_name] = time_submit_now
save_config(waiting_time,'waiting_time.json')
def match_cpu(raw_data):
cache = raw_data[:-1]
matched_data = math.ceil(int(cache)/1e6)
return matched_data
def match_memory(raw_data):
cache = raw_data[:-2]
matched_data = math.ceil(int(cache)/1024)
return matched_data
def match_timestamp(raw_data):
EPOCH = UTC.localize(datetime.utcfromtimestamp(0))
timestamp = parser.parse(raw_data)
if not timestamp.tzinfo:
print("XXX")
timestamp = UTC.localize(timestamp)
s = (timestamp - EPOCH).total_seconds()
return int(s)
def generate_item(response,measurement):
node_cpu = {}
node_cpu['k8s-master'] = 64000 - 8000
node_cpu['k8s-worker0'] = 24000 - 400
node_cpu['k8s-worker2'] = 24000 - 400
node_cpu['k8sworker1'] = 16000 - 520
node_cpu['k8s-worker3'] = 24000 - 150
node_cpu['k8s-worker4'] = 24000 - 150
node_cpu['k8s-worker5'] = 24000 - 150
node_cpu['k8s-worker6'] = 16000 - 150
node_cpu['k8s-worker7'] = 16000 - 150
node_cpu['k8s-worker8'] = 16000 - 150
node_cpu['k8s-worker9'] = 16000 - 150
node_cpu['k8s-worker10'] = 16000 - 150
node_cpu['k8s-worker11'] = 24000 - 300
node_cpu['k8s-worker12'] = 16000 - 150
node_cpu['k8s-worker13'] = 16000 - 150
node_cpu['k8s-worker14'] = 16000 - 150
node_cpu['k8s-worker15'] = 16000 - 150
node_cpu['k8s-worker16'] = 16000 - 150
node_cpu['k8s-worker17'] = 24000 - 150
node_cpu['k8s-worker18'] = 16000 - 150
node_cpu['k8s-worker19'] = 32000 - 150
node_cpu['k8s-worker20'] = 24000 - 150
node_memory = {}
node_memory['k8s-master'] = float(251 * 1024 - 32000)
node_memory['k8s-worker0'] = float(94 * 1024 - 4000)
node_memory['k8s-worker2'] = float(94 * 1024 - 3000)
node_memory['k8sworker1'] = float(125 * 1024 - 4500)
node_memory['k8s-worker3'] = float(94 * 1024 - 2200)
node_memory['k8s-worker4'] = float(188 * 1024 - 2200)
node_memory['k8s-worker5'] = float(94 * 1024 - 2200)
node_memory['k8s-worker6'] = float(62 * 1024 - 2000)
node_memory['k8s-worker7'] = float(62 * 1024 - 2000)
node_memory['k8s-worker8'] = float(62 * 1024 - 2000)
node_memory['k8s-worker9'] = float(62 * 1024 - 2000)
node_memory['k8s-worker10'] = float(62 * 1024 - 2000)
node_memory['k8s-worker11'] = float(94 * 1024 - 2200)
node_memory['k8s-worker12'] = float(62 * 1024 - 2000)
node_memory['k8s-worker13'] = float(62 * 1024 - 2000)
node_memory['k8s-worker14'] = float(62 * 1024 - 2000)
node_memory['k8s-worker15'] = float(62 * 1024 - 2000)
node_memory['k8s-worker16'] = float(62 * 1024 - 2000)
node_memory['k8s-worker17'] = float(94 * 1024 - 2000)
node_memory['k8s-worker18'] = float(62 * 1024 - 2000)
node_memory['k8s-worker19'] = float(125 * 1024 - 2000)
node_memory['k8s-worker20'] = float(94 * 1024 - 2000)
points = []
# content = {}
timestamp = response['items'][0]['metadata']['creationTimestamp']
for item in response['items']:
content = {
'measurement': measurement,
'tags':{
"nodes": item['metadata']['name']
},
'fields': {
'cpu': match_cpu(item['usage']['cpu']),
'memory': match_memory(item['usage']['memory']),
'cpu_percent': float(match_cpu(item['usage']['cpu'])/node_cpu[item['metadata']['name']]),
'memory_percent': float(match_memory(item['usage']['memory']) / node_memory[item['metadata']['name']])
},
'time': match_timestamp(timestamp)
}
points.append(content)
return points
def DeletefromDB(Client,DatabaseName):
databases = Client.get_list_database()
for Cn in databases:
if DatabaseName in Cn.values():
Client.drop_database(DatabaseName)
break
class Node_mess(multiprocessing.Process):
def __init__(self,url,args,tasks,v1):
multiprocessing.Process.__init__(self)
self.url = url
self.args = args
self.derivation = args.derivation
self.time_mess = {}
self.cpu_mess = {}
self.memory_mess = {}
self.cpu_per = {}
self.memory_per = {}
self.node_cpu = {}
self.node_cpu['k8s-master'] = 64000 - 8000
self.node_cpu['k8s-worker0'] = 24000 - 400
self.node_cpu['k8s-worker2'] = 24000 - 400
self.node_cpu['k8sworker1'] = 16000 - 520
self.node_cpu['k8s-worker3'] = 24000 - 150
self.node_cpu['k8s-worker4'] = 24000 - 150
self.node_cpu['k8s-worker5'] = 24000 - 150
self.node_cpu['k8s-worker6'] = 16000 - 150
self.node_cpu['k8s-worker7'] = 16000 - 150
self.node_cpu['k8s-worker8'] = 16000 - 150
self.node_cpu['k8s-worker9'] = 16000 - 150
self.node_cpu['k8s-worker10'] = 16000 - 150
self.node_cpu['k8s-worker11'] = 24000 - 300
self.node_cpu['k8s-worker12'] = 16000 - 150
self.node_cpu['k8s-worker13'] = 16000 - 150
self.node_cpu['k8s-worker14'] = 16000 - 150
self.node_cpu['k8s-worker15'] = 16000 - 150
self.node_cpu['k8s-worker16'] = 16000 - 150
self.node_cpu['k8s-worker17'] = 24000 - 150
self.node_cpu['k8s-worker18'] = 16000 - 150
self.node_cpu['k8s-worker19'] = 32000 - 150
self.node_cpu['k8s-worker20'] = 24000 - 150
self.node_memory = {}
self.node_memory['k8s-master'] = float(251 * 1024 - 32000)
self.node_memory['k8s-worker0'] = float(94 * 1024 - 4000)
self.node_memory['k8s-worker2'] = float(94 * 1024 - 3000)
self.node_memory['k8sworker1'] = float(125 * 1024 - 4500)
self.node_memory['k8s-worker3'] = float(94 * 1024 - 2200)
self.node_memory['k8s-worker4'] = float(188 * 1024 - 2200)
self.node_memory['k8s-worker5'] = float(94 * 1024 - 2200)
self.node_memory['k8s-worker6'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker7'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker8'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker9'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker10'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker11'] = float(94 * 1024 - 2200)
self.node_memory['k8s-worker12'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker13'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker14'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker15'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker16'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker17'] = float(94 * 1024 - 2000)
self.node_memory['k8s-worker18'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker19'] = float(125 * 1024 - 2000)
self.node_memory['k8s-worker20'] = float(94 * 1024 - 2000)
# self.derivation = derivation
self.arg = args
self.tasks = tasks
self.v1 = v1
self.database = args.database
self.measurement = args.measurement
self.save_path = args.save_path
if not os.path.exists(self.arg.save_path):
os.makedirs(self.arg.save_path)
database_create(self.database)
self.client = influxdb.InfluxDBClient('192.168.128.10',port=8086,username='admin',password='<PASSWORD>',database=self.database)
#derivation
# def node_measurement(self,node_list):
# # Global_Influx.Client_all.get_list_measurements()
def run(self):
print(multiprocessing.current_process().pid)
print(os.getpid())
response = catch_message(self.url)
self.time_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.cpu_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.memory_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.cpu_per['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.memory_per['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
for item in response['items']:
self.time_mess[item['metadata']['name']] = [item['timestamp']]
self.cpu_mess[item['metadata']['name']] = [match_cpu(item['usage']['cpu'])]
self.memory_mess[item['metadata']['name']] = [match_memory(item['usage']['memory'])]
self.cpu_per[item['metadata']['name']] = [float(match_cpu(item['usage']['cpu'])/self.node_cpu[item['metadata']['name']])]
self.memory_per[item['metadata']['name']] = [float(match_memory(item['usage']['memory']) / self.node_memory[item['metadata']['name']])]
self.client.write_points(generate_item(response,self.measurement),'s',database=self.database)
time.sleep(self.derivation)
while True:
response = catch_message(self.url)
self.time_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.cpu_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.memory_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.cpu_per['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.memory_per['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
for item in response['items']:
self.time_mess[item['metadata']['name']].append(item['timestamp'])
self.cpu_mess[item['metadata']['name']].append(match_cpu(item['usage']['cpu']))
self.memory_mess[item['metadata']['name']].append(match_memory(item['usage']['memory']))
self.cpu_per[item['metadata']['name']].append(float(match_cpu(item['usage']['cpu'])/self.node_cpu[item['metadata']['name']]))
self.memory_per[item['metadata']['name']].append(float(match_memory(item['usage']['memory']) / self.node_memory[item['metadata']['name']]))
self.client.write_points(generate_item(response, self.measurement), 's', database=self.database)
if len(self.time_mess['creation'])%30==0 and len(self.time_mess['creation']) > 0:
data_frame = pd.DataFrame(self.time_mess)
data_frame.to_csv(self.save_path + '/' + 'struct.csv', mode='a+', index=False, sep=',')
print(self.cpu_mess)
print(len(self.cpu_mess))
for keyss in self.cpu_mess:
print(keyss+": "+str(len(self.cpu_mess[keyss])))
data_frame2 = pd.DataFrame(self.cpu_mess)
data_frame2.to_csv(self.save_path + '/' + 'node_cpu.csv', mode='a+', index=False, sep=',')
data_frame3 = pd.DataFrame(self.memory_mess)
data_frame3.to_csv(self.save_path + '/' + 'node_memory.csv', mode='a+', index=False, sep=',')
data_frame4 = pd.DataFrame(self.cpu_per)
data_frame4.to_csv(self.save_path + '/' + 'node_cpu_per.csv', mode='a+', index=False, sep=',')
data_frame5 = | pd.DataFrame(self.memory_per) | pandas.DataFrame |
"""
Module report
================
A module with helper functions for computing pre-defined plots for the analysis
of fragment combinations.
"""
import warnings
import logging
import argparse
import sys
from shutil import rmtree
from datetime import datetime
import re
from pathlib import Path
from collections import OrderedDict
# data handling
import numpy as np
import json
import pandas as pd
from pandas import DataFrame
import networkx as nx
# data visualization
from matplotlib import pyplot as plt
import seaborn as sns
# from pylab import savefig
from adjustText import adjust_text
# chemoinformatics
import rdkit
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import Mol
from rdkit.Chem import rdChemReactions
# docs
from typing import List
from typing import Tuple
from rdkit import RDLogger
# custom libraries
import npfc
from npfc import utils
from npfc import load
from npfc import save
from npfc import fragment_combination
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GLOBALS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# test
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CLASSES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
class ReporterProcess:
def __init__(self,
chunk_load: str,
chunk_std_passed: str,
chunk_std_filtered: str,
chunk_std_error: str,
chunk_dedupl: str,
WD_out: str,
max_examples: int = 1,
):
pass
class ReporterFragmentSearch:
def __init__(self):
pass
class ReporterFragmentCombination:
def __init__(self):
pass
class ReporterFragmentCombinationGraph:
def __init__(self):
pass
class ReporterPNP:
def __init__(self):
pass
def _parse_std_chunks(chunks: List[str]) -> DataFrame:
"""Parse all output files of a category (passed, filtered or error) for the std step and return a corresponding a results summary.
:param chunks: output files for a category of std results
:return: summary DF with counts
"""
# parse all files
dfs = []
for c in chunks:
df = pd.read_csv(c, sep="|", compression="gzip").groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
if len(df.index) > 0:
dfs.append(df)
# if no case was found, return an empty dataframe
if len(dfs) == 0:
df = pd.DataFrame([], columns=["Count", "Category"])
return df
# concatenate all dataframes and compute the sum of all counts
df = pd.concat(dfs)
df["Category"] = df.index # I don't know how to group by index!
df = df.groupby("Category").sum()
df["Category"] = df.index.map(lambda x: x.replace('filter_', ''))
# df['Perc_status'] = df['Count'].map(lambda x: f"{x/tot_mols:.2%}")
return df
def preprocess(input_load: str,
output_file: str,
input_std_passed: str = None,
input_std_filtered: str = None,
input_std_error: str = None,
input_dedupl: str = None,
input_depict: str = None,
num_examples: int = 0,
):
"""The information is not looked everywhere using the same logic:
- input_load: the log file from the chunk being loaded
"""
# load
df = pd.read_csv(input_load, sep="@", header=None) # char not found in the log file
records = df[df[0].str.contains("FAILURE")].iloc[0][0].split()
num_total = int(records[6])
num_errors = int(records[9])
num_passed = int(df[df[0].str.contains("SAVED")].iloc[0][0].split()[6])
if num_total != num_errors + num_passed:
raise ValueError(f"Error during parsing of log file: '{input_load}': {num_passed} + {num_errors} != {num_total}")
df_load = DataFrame({'Category': ['loaded', 'cannot_load'], 'Count': [num_passed, num_errors]})
# standardize
df_std_passed = load.file(input_std_passed, decode=False)[['task', 'status']].groupby("task").count()[['status']].reset_index().rename({'task': 'Category', 'status': 'Count'}, axis=1)
df_std_filtered = load.file(input_std_filtered, decode=False)[['task', 'status']].groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
df_std_error = load.file(input_std_error, decode=False)[['task', 'status']].groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
# dedupl
df = pd.read_csv(input_dedupl, sep="@", header=None) # char not found in the log file so we can extract all lines as one column
num_passed, num_total = [int(x) for x in df[df[0].str.contains("REMAINING MOLECULES")].iloc[0][0].split("MOLECULES:")[1].split("/")]
num_filtered = num_total - num_passed
df_dedupl = pd.DataFrame({'Category': ['unique', 'duplicate'], 'Count': [num_passed, num_filtered]})
# depict
pass # nothing to do here at the moment since I never saw any problem at this step
# merge data
def get_df_dedupl(WD: str) -> DataFrame:
"""Get a DF summarizing the results of the deduplication step.
:param WD: the directory of the std step
:return: a DF summarizing results of the deduplication step
"""
logger.info("PREP -- COMPUTING DEDUPL RESULTS")
# iterate over the log files to count status
pattern = ".*([0-9]{3})?_dedupl.log"
chunks = _get_chunks(f"{WD}/log", pattern)
chunks = [c for c in chunks if c.split('.')[-1] == 'log'] # ###### quick and dirty
# print(f"{chunks=}")
logger.info(f"PREP -- FOUND {len(chunks):,d} CHUNKS")
# initiate counts
num_tot = 0
num_passed = 0
num_filtered = 0
for c in chunks:
df = | pd.read_csv(c, sep="@", header=None) | pandas.read_csv |
from cplvm import CPLVM
from cplvm import CPLVMLogNormalApprox
from pcpca import CPCA, PCPCA
import functools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
import matplotlib
import time
import subprocess
import os
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
tf.enable_v2_behavior()
warnings.filterwarnings("ignore")
if __name__ == "__main__":
p_list = [10, 100, 1000]
num_datapoints_x, num_datapoints_y = 100, 100
NUM_REPEATS = 30
latent_dim_shared, latent_dim_foreground = 3, 3
decisions_experiment = []
decisions_shuffled = []
test_stats_experiment = []
test_stats_shuffled = []
alpha = 0.05
for ii, n_genes in enumerate(p_list):
for jj in range(NUM_REPEATS):
#################################
######### H1 is true ############
#################################
# ------- generate data ---------
cplvm_for_data = CPLVM(
k_shared=latent_dim_shared, k_foreground=latent_dim_foreground
)
concrete_cplvm_model = functools.partial(
cplvm_for_data.model,
data_dim=n_genes,
num_datapoints_x=num_datapoints_x,
num_datapoints_y=num_datapoints_y,
counts_per_cell_X=1,
counts_per_cell_Y=1,
is_H0=False,
)
model = tfd.JointDistributionCoroutineAutoBatched(concrete_cplvm_model)
deltax, sf_x, sf_y, s, zx, zy, w, ty, X_sampled, Y_sampled = model.sample()
X, Y = X_sampled.numpy(), Y_sampled.numpy()
X = np.log(X + 1)
Y = np.log(Y + 1)
X = (X - X.mean(0)) / (X.std(0) + 1e-6)
Y = (Y - Y.mean(0)) / (Y.std(0) + 1e-6)
pd.DataFrame(X.T).to_csv("./tmp/X.csv")
pd.DataFrame(Y.T).to_csv("./tmp/Y.csv")
##### Run test procedure #####
os.system("Rscript johnston2008_test.R")
curr_output = pd.read_csv("./tmp/curr_johnston_output.csv", index_col=0)
test_stats_experiment.append(curr_output.values[0, 0])
decisions_experiment.append(curr_output.values[0, 1] < alpha)
#################################
######### H0 is true ############
#################################
# ------- generate data ---------
cplvm_for_data = CPLVM(
k_shared=latent_dim_shared, k_foreground=latent_dim_foreground
)
concrete_cplvm_model = functools.partial(
cplvm_for_data.model,
data_dim=n_genes,
num_datapoints_x=num_datapoints_x,
num_datapoints_y=num_datapoints_y,
counts_per_cell_X=1,
counts_per_cell_Y=1,
is_H0=True,
)
model = tfd.JointDistributionCoroutineAutoBatched(concrete_cplvm_model)
deltax, sf_x, sf_y, s, zx, zy, X_sampled, Y_sampled = model.sample()
X, Y = X_sampled.numpy(), Y_sampled.numpy()
X = np.log(X + 1)
Y = np.log(Y + 1)
X = (X - X.mean(0)) / (X.std(0) + 1e-6)
Y = (Y - Y.mean(0)) / (Y.std(0) + 1e-6)
pd.DataFrame(X.T).to_csv("./tmp/X.csv")
| pd.DataFrame(Y.T) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_getitem_fancy_slice(self):
pass
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPanel(PanelTests, CheckIndexing, SafeForSparse):
def test_constructor_cast(self):
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
assert len(empty.items) == 0
assert len(empty.major_axis) == 0
assert len(empty.minor_axis) == 0
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
assert panel.values.dtype == np.object_
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
assert panel[i].values.dtype.name == dtype
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(
np.random.randn(2, 10, 5),
items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5),
dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
msg = "The number of dimensions required is 3"
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(10, 2))
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
assert list(p.items) == keys
p = Panel.from_dict(d)
assert list(p.items) == keys
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
assert panel['foo'].values.dtype == np.object_
assert panel['A'].values.dtype == np.float64
def test_constructor_error_msgs(self):
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(4, 5, 5\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(4), lrange(5), lrange(5))
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 4, 5\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(4), lrange(5))
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 5, 4\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(5), lrange(4))
def test_apply_slabs(self):
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(
lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = | Series([0.0] * 5) | pandas.Series |
import math
import matplotlib.pyplot as plt
import seaborn as sns
from numpy import ndarray
from pandas import DataFrame, np, Series
from Common.Comparators.Portfolio.AbstractPortfolioComparator import AbstractPortfolioComparator
from Common.Measures.Portfolio.PortfolioBasics import PortfolioBasics
from Common.Measures.Portfolio.PortfolioFinal import PortfolioFinal
from Common.Measures.Portfolio.PortfolioLinearReg import PortfolioLinearReg
from Common.Measures.Portfolio.PortfolioOptimizer import PortfolioOptimizer
from Common.Measures.Portfolio.PortfolioStats import PortfolioStats
from Common.Measures.Time.TimeSpan import TimeSpan
from Common.StockMarketIndex.AbstractStockMarketIndex import AbstractStockMarketIndex
from Common.StockMarketIndex.Yahoo.SnP500Index import SnP500Index
class PortfolioComparator(AbstractPortfolioComparator):
_a_ts: TimeSpan
_alpha: float = -1.1
_beta: float = -1.1
_a_float: float = -1.1
_a_suffix: str = ''
_a_length: int = -1
_stocks: list
_weights: ndarray
_legend_place: str = 'upper left'
_dataWeightedReturns: DataFrame = DataFrame()
_dataSimpleSummary: DataFrame = DataFrame()
_dataSimpleCorrelation: DataFrame = DataFrame()
_dataSimpleCovariance: DataFrame = DataFrame()
_dataSimpleCovarianceAnnual: DataFrame = DataFrame()
_data_returns_avg: Series = Series()
#_portfolio_weighted_returns: Series = Series()
_portfolio_weighted_returns_cum: Series = Series()
_portfolio_weighted_returns_geom: float = -1.1
_portfolio_weighted_annual_std: float = -1.1
_portfolio_weighted_sharpe_ratio: float = -1.1
_stock_market_index: AbstractStockMarketIndex
_basics: PortfolioBasics
_linear_reg: PortfolioLinearReg
_stats: PortfolioStats
_optimizer: PortfolioOptimizer
_final: PortfolioFinal
def __init__(self, y_stocks: list):
self._a_float = 3 * math.log(y_stocks[0].TimeSpan.MonthCount)
self._a_suffix = y_stocks[0].Column
self._a_ts = y_stocks[0].TimeSpan
self._a_length = len(y_stocks)
iso_weight: float = round(1.0 / len(y_stocks), 3)
self._stocks = y_stocks
self._weights = np.array(len(y_stocks) * [iso_weight], dtype=float)
self._basics = PortfolioBasics(y_stocks, self._a_float, self._legend_place)
self._stats = PortfolioStats(self._weights, self._basics)
self._final = PortfolioFinal(y_stocks, self._a_float, self._legend_place)
print('Volatility\t\t\t\t\t', self._final.Volatility)
print('Annual Expected Return\t\t', self._final.AnnualExpectedReturn)
print('Risk Free Rate\t\t\t\t', self._final.RiskFreeRate)
print('Free 0.005 Sharpe Ratio\t\t', self._final.Free005SharpeRatio)
print('Kurtosis\n', self._final.KurtosisSeries)
print('Skewness\n', self._final.SkewnessSeries)
print('Frequency\n', self._final.Frequency)
self._final.Plot().show()
exit(1234)
self._dataSimpleCorrelation = self._stats.SimpleReturnsNan.corr()
self._dataSimpleCovariance = self._stats.SimpleReturnsNan.cov()
self._dataSimpleCovarianceAnnual = self._dataSimpleCovariance * 252
self._dataSimpleSummary = self._stats.SimpleReturnsNanSummary
self._dataWeightedReturns = self._stats.SimpleWeightedReturns
# axis =1 tells pandas we want to add the rows
self._portfolio_weighted_returns = round(self._dataWeightedReturns.sum(axis=1), 5)
print('7', self._portfolio_weighted_returns.head())
print('7', self._stats.SimpleWeightedReturnsSum.head())
#self._dataWeightedReturns['PORTFOLIOWeighted'] = portfolio_weighted_returns
portfolio_weighted_returns_mean = round(self._portfolio_weighted_returns.mean(), 5)
print('port_ret mean', portfolio_weighted_returns_mean)
print(round(self._stats.SimpleWeightedReturnsSum.mean(), 5))
portfolio_weighted_returns_std = round(self._portfolio_weighted_returns.std(), 5)
print('port_ret std', portfolio_weighted_returns_std)
self._portfolio_weighted_returns_cum: Series = round((self._portfolio_weighted_returns + 1).cumprod(), 5)
#self._dataWeightedReturns['PORTFOLIOCumulative'] = self._portfolio_weighted_returns_cum
print('$', self._dataWeightedReturns.head())
self._portfolio_weighted_returns_geom = round(np.prod(self._portfolio_weighted_returns + 1) ** (252 / self._portfolio_weighted_returns.shape[0]) - 1, 5)
print('geometric_port_return', self._portfolio_weighted_returns_geom)
self._portfolio_weighted_annual_std = round(np.std(self._portfolio_weighted_returns) * np.sqrt(252), 5)
print('port_ret annual', self._portfolio_weighted_annual_std)
self._portfolio_weighted_sharpe_ratio = round(self._portfolio_weighted_returns_geom / self._portfolio_weighted_annual_std, 5)
print('port_sharpe_ratio', self._portfolio_weighted_sharpe_ratio)
print('%', self._stats.Returns.head())
self._data_returns_avg = self._getDataReturnsAverage(self._stats.Returns)
print('^', self._data_returns_avg.head())
daily_log_pct_changes: DataFrame = np.log(self._stats.Returns.pct_change() + 1) #avant portfolio
daily_log_pct_changes.columns = daily_log_pct_changes.columns + 'LogReturn'
print('&', daily_log_pct_changes.head())
daily_log_volatilities: DataFrame = (daily_log_pct_changes.std() * np.sqrt(252)).to_frame()
daily_log_volatilities.columns = ['Volatility']
print('*', daily_log_volatilities)
port_daily_simple_ret: float = round(np.sum(self._stats.SimpleReturnsNan.mean()*self._weights), 5)
port_weekly_simple_ret: float = round(4.856 * port_daily_simple_ret, 5)
port_monthly_simple_ret: float = round(21 * port_daily_simple_ret, 5)
port_quarterly_simple_ret: float = round(63 * port_daily_simple_ret, 5)
port_yearly_simple_ret: float = round(252 * port_daily_simple_ret, 5)
print('port_daily_simple_ret', str(100*port_daily_simple_ret) + '%')
print('port_weekly_simple_ret', str(100*port_weekly_simple_ret) + '%')
print('port_monthly_simple_ret', str(100*port_monthly_simple_ret) + '%')
print('port_quarterly_simple_ret', str(100*port_quarterly_simple_ret) + '%')
print('port_yearly_simple_ret', str(100*port_yearly_simple_ret) + '%')
self._setPortfolioInfo()
self._optimizer = PortfolioOptimizer(self._legend_place, self._a_float, self._stats, self._basics.Data)
self._stock_market_index = SnP500Index('yahoo', "^GSPC", self._a_ts)
self._linear_reg = PortfolioLinearReg(self._stock_market_index, self._stats.Returns)
print(f'The portfolio beta is {self._linear_reg.Beta}, for each 1% of index portfolio will move {self._linear_reg.Beta}%')
print('The portfolio alpha is ', self._linear_reg.Alpha)
print('_', self._basics.DataLogReturns.head())
cov_mat_annual = self._basics.DataLogReturns.cov() * 252
print('-', cov_mat_annual)
def _getDataReturnsAverage(self, a_df: DataFrame = | DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 14:33:01 2018
@author: AyushRastogi
"""
# Extracting the cumulative 60, 90, 180, 365 and 730 day production for Oil, Gas and Water
import pandas as pd
import os
os.getcwd() # Get the default working directory
path = r'C:\Users\ayush\Desktop\Meetup2_All Files'
os.chdir(path)
# Reading the input file with cumulative oil/gas/water production and cum days
df = pd.read_csv(path+r'\Cumulative_Production_0619.csv')
# The process is repeated for Oil, Gas and Water
# --------------------------OIL Production------------------
# Creating the columns and filling them with 0
df['60_Interpol_OIL'] = 0
df['90_Interpol_OIL'] = 0
df['180_Interpol_OIL'] = 0
df['365_Interpol_OIL'] = 0
df['730_Interpol_OIL'] = 0
# For loop which runs through every row (until last but 1). If the cum_days value we need (60/90/180/365/730) fall in between the cell value, it uses linear
# interpolation and calculates the cumulative sum for that particular value
# y = y1 + ((y2-y1)*(x-x1)/(x2-x1)), where y = required production value, and x = 365 (Example)
for count in range(len(df['APINO'])-1): #loop running through the entire column
if (df['cum_days'][count] < 60 and df['cum_days'][count+1] > 60):
df['60_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(60 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 60): # if the required value is already present, simply copy it
df['60_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['60_Interpol_OIL'], errors='coerce') # Convert the column values to numbers
df['60_Interpol_OIL'] = df['60_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist() # Getting only 1 decimal place and adding values to a list
df[df['60_Interpol_OIL'] != '0.0'] # Getting rid of all the values which = 0.0
df['60_Interpol_OIL'].astype(float) # Convert the datatype to float (better since its a calculation)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 90 and df['cum_days'][count+1] > 90):
df['90_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(90 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 90):
df['90_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['90_Interpol_OIL'], errors='coerce')
df['90_Interpol_OIL'] = df['90_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['90_Interpol_OIL'] != '0.0']
df['90_Interpol_OIL'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 180 and df['cum_days'][count+1] > 180):
df['180_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(180 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 180):
df['180_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['180_Interpol_OIL'], errors='coerce')
df['180_Interpol_OIL'] = df['180_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['180_Interpol_OIL'] != '0.0']
df['180_Interpol_OIL'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 365 and df['cum_days'][count+1] > 365):
df['365_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(365 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 365):
df['365_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['365_Interpol_OIL'], errors='coerce')
df['365_Interpol_OIL'] = df['365_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['365_Interpol_OIL'] != '0.0']
df['365_Interpol_OIL'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 730 and df['cum_days'][count+1] > 730):
df['730_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(730 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 730):
df['730_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['730_Interpol_OIL'], errors='coerce')
df['730_Interpol_OIL'] = df['730_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['730_Interpol_OIL'] != '0.0']
df['730_Interpol_OIL'].astype(float)
# --------------------------GAS Production------------------
df['60_Interpol_GAS'] = 0
df['90_Interpol_GAS'] = 0
df['180_Interpol_GAS'] = 0
df['365_Interpol_GAS'] = 0
df['730_Interpol_GAS'] = 0
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 60 and df['cum_days'][count+1] > 60):
df['60_Interpol_GAS'][count] = df['cum_gas'][count-1] + ((df['cum_gas'][count+1]) - df['cum_gas'][count-1])*(60 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 60):
df['60_Interpol_GAS'][count] = df['cum_gas'][count]
pd.to_numeric(df['60_Interpol_GAS'], errors='coerce')
df['60_Interpol_GAS'] = df['60_Interpol_GAS'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['60_Interpol_GAS'] != '0.0']
df['60_Interpol_GAS'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 90 and df['cum_days'][count+1] > 90):
df['90_Interpol_GAS'][count] = df['cum_gas'][count-1] + ((df['cum_gas'][count+1]) - df['cum_gas'][count-1])*(90 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 90):
df['90_Interpol_GAS'][count] = df['cum_gas'][count]
pd.to_numeric(df['90_Interpol_GAS'], errors='coerce')
df['90_Interpol_GAS'] = df['90_Interpol_GAS'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['90_Interpol_GAS'] != '0.0']
df['90_Interpol_GAS'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 180 and df['cum_days'][count+1] > 180):
df['180_Interpol_GAS'][count] = df['cum_gas'][count-1] + ((df['cum_gas'][count+1]) - df['cum_gas'][count-1])*(180 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 180):
df['180_Interpol_GAS'][count] = df['cum_gas'][count]
pd.to_numeric(df['180_Interpol_GAS'], errors='coerce')
df['180_Interpol_GAS'] = df['180_Interpol_GAS'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['180_Interpol_GAS'] != '0.0']
df['180_Interpol_GAS'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 365 and df['cum_days'][count+1] > 365):
df['365_Interpol_GAS'][count] = df['cum_gas'][count-1] + ((df['cum_gas'][count+1]) - df['cum_gas'][count-1])*(365 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 365):
df['365_Interpol_GAS'][count] = df['cum_gas'][count]
pd.to_numeric(df['365_Interpol_GAS'], errors='coerce')
df['365_Interpol_GAS'] = df['365_Interpol_GAS'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['365_Interpol_GAS'] != '0.0']
df['365_Interpol_GAS'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 730 and df['cum_days'][count+1] > 730):
df['730_Interpol_GAS'][count] = df['cum_gas'][count-1] + ((df['cum_gas'][count+1]) - df['cum_gas'][count-1])*(730 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 730):
df['730_Interpol_GAS'][count] = df['cum_gas'][count]
pd.to_numeric(df['730_Interpol_GAS'], errors='coerce')
df['730_Interpol_GAS'] = df['730_Interpol_GAS'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['730_Interpol_GAS'] != '0.0']
df['730_Interpol_GAS'].astype(float)
# ---------------------------Water Production-------------------
df['60_Interpol_WATER'] = 0
df['90_Interpol_WATER'] = 0
df['180_Interpol_WATER'] = 0
df['365_Interpol_WATER'] = 0
df['730_Interpol_WATER'] = 0
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 60 and df['cum_days'][count+1] > 60):
df['60_Interpol_WATER'][count] = df['cum_water'][count-1] + ((df['cum_water'][count+1]) - df['cum_water'][count-1])*(60 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 60):
df['60_Interpol_WATER'][count] = df['cum_water'][count]
pd.to_numeric(df['60_Interpol_WATER'], errors='coerce')
df['60_Interpol_WATER'] = df['60_Interpol_WATER'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['60_Interpol_WATER'] != '0.0']
df['60_Interpol_WATER'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 90 and df['cum_days'][count+1] > 90):
df['90_Interpol_WATER'][count] = df['cum_water'][count-1] + ((df['cum_water'][count+1]) - df['cum_water'][count-1])*(90 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 90):
df['90_Interpol_WATER'][count] = df['cum_water'][count]
pd.to_numeric(df['90_Interpol_WATER'], errors='coerce')
df['90_Interpol_WATER'] = df['90_Interpol_WATER'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['90_Interpol_WATER'] != '0.0']
df['90_Interpol_WATER'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 180 and df['cum_days'][count+1] > 180):
df['180_Interpol_WATER'][count] = df['cum_water'][count-1] + ((df['cum_water'][count+1]) - df['cum_water'][count-1])*(180 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 180):
df['180_Interpol_WATER'][count] = df['cum_water'][count]
| pd.to_numeric(df['180_Interpol_WATER'], errors='coerce') | pandas.to_numeric |
"""
Written by <NAME>, 22-10-2018
This script contains functions for data formatting and accuracy assessment of keras models
"""
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import keras.backend as K
from math import sqrt
import numpy as np
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = | pd.concat(cols, axis=1) | pandas.concat |
import numpy as np
import pylab as pl
from itertools import product
from lib_predict_io import find_matching_trials, load_experiment_data, load_simulation_data
from motionstruct.functions import dist_mod2pi
def score_sep(vb, vn):
"""We combine var_bias and Sig_noise in a score,
ranging from 0 (only bias) to 1 (only noise)
"""
vn = vn.diagonal()
vb = np.array(vb) if np.ndim(vb) == 1 else np.array(vb).diagonal()
return vn / (vb + vn)
# # # PARAMS
from DSLs_predict_MarApr2019 import experiment_label, conditions, subjects, DSLs
score = score_sep
path_exp = "./data/paper/"
path_sim = "./data/sim/"
conditions = list(conditions)
outfname_data = "bias_variance_analysis_%s.pkl.zip" %experiment_label
# # # END OF PARAMS
def df_empty(columns, dtypes, index=None):
import pandas as pd
assert len(columns)==len(dtypes)
df = pd.DataFrame(index=index)
for c,d in zip(columns, dtypes):
df[c] = | pd.Series(dtype=d) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 16:30:38 2019
input/output operation.
@author: zoharslong
"""
from base64 import b64encode, b64decode
from numpy import ndarray as typ_np_ndarray
from pandas.core.series import Series as typ_pd_Series # 定义series类型
from pandas.core.frame import DataFrame as typ_pd_DataFrame # 定义dataframe类型
from pandas.core.indexes.base import Index as typ_pd_Index # 定义dataframe.columns类型
from pandas.core.indexes.range import RangeIndex as typ_pd_RangeIndex # 定义dataframe.index类型
from pandas.core.groupby.generic import DataFrameGroupBy as typ_pd_DataFrameGroupBy # 定义dataframe.groupby类型
from pandas import DataFrame as pd_DataFrame, read_csv, read_excel, concat, ExcelWriter
from time import sleep
from datetime import timedelta as typ_dt_timedelta
from os import listdir, makedirs
from tempfile import gettempdir # 用于搜索fakeuseragent本地temp
from os.path import join as os_join, exists as os_exists
from openpyxl import load_workbook # 保存已有的excel文件中的表
from fake_useragent import UserAgent, VERSION as fku_version # FakeUserAgentError,
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from pymysql import connect, IntegrityError
from urllib3.util.retry import MaxRetryError
from urllib3.response import ProtocolError
from urllib3.connection import NewConnectionError
from requests.models import ChunkedEncodingError
from requests.adapters import ProxyError
from requests import post, get, TooManyRedirects, ReadTimeout
from re import findall as re_find, sub as re_sub
from random import randint
from json import loads, JSONDecodeError
from pyzohar.sub_slt_bsc.bsz import stz, lsz, dcz, dtz
# from socket import getfqdn, gethostname # 获得本机IP # from telnetlib import Telnet # 代理ip有效性检测的第二种方法
class ioBsc(pd_DataFrame):
"""
I/O basic
ioBsc.lcn in {
'fld','fls',
'mng','mdb','cln',
'sql','sdb','tbl',
'url'/'url_lst'/'url_ctt','url_htp',
'hdr','pst','prm',
'prx',‘prx_tms’,
'ppc':{
'key': [],
'ndx': [],
},
}
"""
lst_typ_dts = [
str,
stz,
list,
lsz,
dict,
dcz,
tuple,
bytes,
typ_np_ndarray,
typ_pd_DataFrame,
typ_pd_Series,
typ_pd_Index,
typ_pd_RangeIndex,
typ_pd_DataFrameGroupBy,
type(None)
] # data sets' type
lst_typ_lcn = [list, lsz, dict, dcz, type(None)] # io methods' type
def __init__(self, dts=None, lcn=None, *, spr=False):
# all the i/o operations have the same attributes for locate target data: location and collection
super().__init__() # 不将dts预传入DataFrame
self.__dts, self._dts, self.typ = None, None, None # 接受数据
self.len, self.clm, self.hdr, self.tal = None, None, None, None
self.kys, self.vls = None, None
self.__lcn, self.iot = None, None # 连接信息
self._mySql, self._mySdb, self._myTbl = None, None, None
self._myMng, self._myMdb, self._myCln = None, None, None
self.__init_rst(dts, lcn, spr=spr)
def __init_rst(self, dts=None, lcn=None, *, spr=False):
"""
private reset initiation.
:param dts: a data set to input or output
:return: None
"""
try:
self.dts = dts.copy() if self.dts is None and dts is not None else []
except AttributeError:
self.dts = dts if self.dts is None and dts is not None else []
self.lcn = lcn.copy() if self.lcn is None and lcn is not None else {}
if spr:
self.spr_nit()
def spr_nit(self, rtn=False):
"""
super initiation.
:param rtn: default False
:return:
"""
try:
super(ioBsc, self).__init__(self.__dts)
except ValueError:
print('info: %s cannot convert to DataFrame.' % (str(self.__dts)[:8]+'..'))
if rtn:
return self
def __str__(self):
"""
print(io path).
:return: None
"""
dct_prn = {i: self.lcn[i] for i in self.lcn.keys() if i in ['fls', 'cln', 'tbl', 'url']}
return '<io: %s; ds: %s>' % (str(dct_prn), self.typ)
__repr__ = __str__ # 调用类名的输出与print(className)相同
@property
def dts(self):
"""
@property get & set lsz.seq.
:return: lsz.seq
"""
return self.__dts
@dts.setter
def dts(self, dts):
"""
self.dts = dts
:param dts: a dataset to import.
:return: None
"""
if dts is None or type(dts) in self.lst_typ_dts:
try:
self.__dts = dts.copy() if dts is not None else dts
except AttributeError:
self.__dts = dts
self.__attr_rst('dts')
else:
raise TypeError('info: dts\'s type %s is not available.' % type(dts))
def set_dts(self, dts, *, ndx_rst=True, ndx_lvl=None):
"""
if do not reset index after set data set, use self.set_dts() instead of self.dts
:param dts: data set to fill self.dts
:param ndx_rst: if reset data set's index or not, default True
:param ndx_lvl: DataFrame.reset_index(level=prm), default None
:return: None
"""
if dts is None or type(dts) in self.lst_typ_dts:
try:
self.__dts = dts.copy() if dts is not None else dts
except AttributeError:
self.__dts = dts
self.__attr_rst('dts', ndx_rst=ndx_rst, ndx_lvl=ndx_lvl)
else:
raise TypeError('info: dts\'s type %s is not available.' % type(dts))
@property
def lcn(self):
"""
self.location.
:return: self.__lcn
"""
return self.__lcn
@lcn.setter
def lcn(self, lcn):
"""
set self.__lcn in self.lcn.
:param lcn: a dict of params for self
:return: None
"""
if type(lcn) in self.lst_typ_lcn:
if self.__lcn is None: # 当self.__lcn为空时, 直接对self__lcn进行赋值
self.__lcn = lcn
elif type(lcn) in [dict]: # 当self.__lcn中已有值时, 使用lcn对其进行更新
self.__lcn.update(lcn) # 使用update更新self.__lcn, 要求self.__lcn必为dict类型
self.__attr_rst('lcn')
else:
raise TypeError('info: lcn\'s type %s is not available.' % type(lcn))
def mng_nit(self):
"""
if self.io type in mongodb, reset mongo attributes _myMng, _mySdb, _myCln.
:return: None
"""
if 'mng' not in self.lcn.keys():
self.lcn['mng'] = None
self.lcn['mng'] = "mongodb://localhost:27017" if not self.lcn['mng'] else self.lcn['mng']
self._myMng = MongoClient(host=self.lcn['mng'])
self._myMdb = self._myMng[self.lcn['mdb']] if [True if 'mdb' in self.lcn.keys() else False] else None
self._myCln = self._myMdb[self.lcn['cln']] if [True if 'cln' in self.lcn.keys() else False] else None
def sql_nit(self):
"""
SQL initiate. needs self.lcn={'sql'={'hst','prt','usr','psw'},'sdb','tbl'}
:return: None
"""
if 'sql' not in self.lcn.keys():
self.lcn['sql'] = None
self.lcn['sql'] = {'hst': '172.16.0.13', 'prt': 3306, 'usr': None, 'psw': None} if \
not self.lcn['sql'] else self.lcn['sql']
self._mySql = self.lcn['sql'] if [True if 'sql' in self.lcn.keys() else False] else None
self._mySdb = self.lcn['sdb'] if [True if 'sdb' in self.lcn.keys() else False] else None
self._myTbl = self.lcn['tbl'] if [True if 'tbl' in self.lcn.keys() else False] else None
def api_nit(self):
"""
API initiate. needs self.lcn={'url'/'url_lst'/'url_ctt','pst','hdr','prx','prm'}
:return:
"""
# 检查本地fakeUserAgent文件是否存在, 若否则自动创建
if 'fake_useragent_' + fku_version + '.json' not in listdir(gettempdir()):
fku = get('https://fake-useragent.herokuapp.com/browsers/' + fku_version, timeout=180)
with open(os_join(gettempdir(), 'fake_useragent_' + fku_version + '.json'), "w") as wrt:
wrt.write(fku.text)
if 'pst' not in self.lcn.keys():
self.lcn['pst'] = None # post请求中在请求data中发送的参数数据
if 'hdr' not in self.lcn.keys():
self.lcn['hdr'] = {'User-Agent': UserAgent(use_cache_server=False).random} # 若未指定请求头就现编一个简直可怕
else:
self.lcn['hdr'].update({'User-Agent': UserAgent(use_cache_server=False).random}) # 若制定了则自动刷新一次假头
if 'prx' not in self.lcn.keys():
self.lcn['prx'] = None # 是否调用代理
if 'prm' not in self.lcn.keys():
self.lcn['prm'] = None # get请求中后缀于url的参数
def dts_nit(self, ndx_rst=True, ndx_lvl=None):
"""
dataset initiate, generate attributes typ, len, kys, vls, clm, hdr, tal and if reset index or not.
:param ndx_rst: if reset index or not, default True
:param ndx_lvl: if reset index, set the level of index
:return: None
"""
lst_xcp = []
try:
self.typ = type(self.__dts)
except TypeError:
lst_xcp.append('type')
try:
self.len = self.dts.__len__()
except AttributeError:
lst_xcp.append('len')
try:
self.kys = self.dts.keys()
except (AttributeError, TypeError):
lst_xcp.append('keys')
try:
self.vls = self.dts.values()
except (AttributeError, TypeError):
lst_xcp.append('values')
if self.typ in [typ_pd_DataFrame]:
self.clm = self.dts.columns
self.hdr = self.dts.head()
self.tal = self.dts.tail()
self.hdr = self.dts[:5] if self.typ in [list] else self.hdr
try:
if ndx_rst:
self.dts.reset_index(drop=True, inplace=True, level=ndx_lvl)
except AttributeError:
lst_xcp.append('resetIndex')
if not lst_xcp:
print('info: %s is not available for %s.' % (str(lst_xcp), str(self.__dts)[:8] + '..'))
def lcn_nit(self, prn=False):
"""
location initiate, let self.iot in ['lcl','mng','sql','api'] for [local, mongodb, sql, api].
:return: None
"""
self.iot = []
if [True for i in self.lcn.keys() if i in ['fld']] == [True]:
self.iot.append('lcl')
if [True for i in self.lcn.keys() if i in ['sdb']] == [True]:
self.iot.append('sql')
if [True for i in self.lcn.keys() if i in ['mdb']] == [True]:
self.iot.append('mng')
if set([True for i in self.lcn.keys() if re_find('url', i)]) in [{True}]:
self.iot.append('api')
if not self.iot and prn:
print(' info: <.lcn: %s> is not available.' % self.lcn)
def __attr_rst(self, typ=None, *, ndx_rst=True, ndx_lvl=None):
"""
reset attributes lsz.typ.
:param typ: type of attributes resets, in ['dts','lcn'] for data set reset and location reset
:return: None
"""
if typ in ['dts', None]:
self.dts_nit(ndx_rst, ndx_lvl)
if typ in ['lcn', None]:
self.lcn_nit()
if [True for i in self.iot if i in ['mng', 'mnz']]: # for special cases, reset some attributes
self.mng_nit()
if [True for i in self.iot if i in ['sql', 'sqz']]:
self.sql_nit()
if [True for i in self.iot if i in ['api', 'apz']]:
self.api_nit()
def typ_to_dtf(self, clm=None, *, spr=False, rtn=False):
"""
self.dts's type from others to dataFrame.
:param clm: define the columns' name in the final dataFrame
:param spr: super or not, default False
:param rtn: return or not, default False
:return: None if not rtn
"""
if self.typ in [typ_pd_DataFrame]:
pass
elif self.len == 0 or self.dts in [None, [], [{}]]:
self.dts = pd_DataFrame()
elif self.typ in [dict, dcz]:
self.dts = pd_DataFrame([self.dts])
elif self.typ in [list, lsz, typ_np_ndarray]:
self.dts = pd_DataFrame(self.dts, columns=clm)
elif self.typ in [typ_pd_Series]:
self.dts = pd_DataFrame(self.dts)
# from sas7bdat import SAS7BDAT as typ_sas7bdat
# elif self.typ in [typ_sas7bdat]: # https://pypi.org/project/sas7bdat/
# self.dts = self.dts.to_data_frame()
else:
raise AttributeError('type of dts is not available')
if spr:
self.spr_nit()
if rtn:
return self.dts
def dtf_to_typ(self, typ='list', *, rtn=False, prm='records'):
"""
alter dataFrame to other type.
:param typ: alter type dataFrame to this type, default 'list'
:param rtn: return the result or not, default False
:param prm: to_dict(orient=''), default 'records' in ['records','dict','list','series','split','index']
:return: if rtn is True, return the result
"""
if typ.lower() in ['list', 'lst', 'lsz'] and self.typ in [typ_pd_DataFrame]:
self.dts = self.dts.to_dict(orient=prm)
else:
raise AttributeError('stop: type is not available')
if rtn:
return self.dts
class lclMixin(ioBsc):
"""
local files input and output operations.
lcn format in {'fld':'','fls':['','',...],'mng':None,'mdb':}.
>>> lclMixin(lcn={'fld':'dst/samples','fls':['smp01.xlsx']}).lcl_mpt(rtn=True) # 从指定文件夹位置导入文件到内存
A B C
0 a 1 e
1 b 2 f
2 c 3 g
"""
def __init__(self, dts=None, lcn=None, *, spr=False):
super(lclMixin, self).__init__(dts, lcn, spr=spr)
def mpt_csv(self, fld=None, fls=None, sep=None, *, spr=False, rtn=False):
"""
import csv from folds into RAM.
:param fld:
:param fls:
:param sep:
:param spr:
:param rtn:
:return:
"""
fld = self.lcn['fld'] if fld is None else fld
fls = self.lcn['fls'] if fls is None else fls
fls = fls[0] if type(fls) in [list] else fls
sep = ',' if sep is None else sep
self.dts = read_csv(os_join(fld, fls), sep=sep)
if spr:
self.spr_nit()
if rtn:
return self.dts
def mpt_xcl(self, fld=None, fls=None, hdr=None, sht=None, *, spr=False, rtn=False):
"""
import excel data from folds into RAM
:param fld:
:param fls:
:param hdr:
:param sht:
:param spr:
:param rtn:
:return:
"""
fld = self.lcn['fld'] if fld is None else fld
fls = self.lcn['fls'] if fls is None else fls
fls = fls[0] if type(fls) in [list] else fls
hdr = 0 if hdr is None else hdr
sht = 0 if sht is None else sht
self.dts = read_excel(os_join(fld, fls), header=hdr, sheet_name=sht)
if spr:
self.spr_nit()
if rtn:
return self.dts
def mpt_txt(self, fld=None, fls=None, *, spr=False, rtn=False):
"""
import txt from folds into RAM
:param fld: target fold
:param fls: target file
:param spr:
:param rtn:
:return:
"""
fld = self.lcn['fld'] if fld is None else fld
fls = self.lcn['fls'] if fls is None else fls
fls = fls[0] if type(fls) in [list] else fls
with open(os_join(fld, fls), mode='r', encoding='utf-8') as act:
self.dts = act.readlines() # 读出的为列表
if spr:
self.spr_nit()
if rtn:
return self.dts
def mpt_img(self, fld=None, fls=None, *, spr=False, rtn=False):
"""
import image from disc
:param fld: target fold
:param fls: target file
:param spr:
:param rtn:
:return: if rtn, return a string in type base64
"""
fld = self.lcn['fld'] if fld is None else fld
fls = self.lcn['fls'] if fls is None else fls
fls = fls[0] if type(fls) in [list] else fls
with open(os_join(fld, fls), mode='rb') as act:
self.dts = b64encode(act.read())
if spr:
self.spr_nit()
if rtn:
return self.dts
def lcl_mpt(self, *, sep=None, hdr=None, sht=None, spr=False, rtn=False):
"""
local files importation.
:param sep:
:param hdr:
:param sht:
:param spr:
:param rtn:
:return:
"""
if type(self.lcn['fls']) is str: # 对可能存在的'fls'对多个文件的情况进行统一
self.lcn = {'fls': [self.lcn['fls']]}
if type(self.dts) in [typ_pd_DataFrame] and self.len == 0:
dtf_mrg = | pd_DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from numpy.random import randint
import os
import netCDF4
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tensorflow.keras.optimizers import Adam
import logging
logger = logging.getLogger(__name__)
def _valid_times(dataset, variable):
'''Search dataset for time axis'''
var = dataset.variables[variable]
for d in var.dimensions:
if d.startswith('time'):
if d in dataset.variables:
tvar = dataset.variables[d]
return np.array(
netCDF4.num2date(tvar[:], units=tvar.units),
dtype='datetime64[s]')
coords = var.coordinates.split()
for c in coords:
if c.startswith('time'):
tvar = dataset.variables[c]
return np.array(
netCDF4.num2date(tvar[:], units=tvar.units),
dtype='datetime64[s]')
def load_netcdf(dirs, ext, variable, channels, seasons, era = False):
files = []
names = []
dirs = sorted(set(dirs))
# For each dir in dirs
for d in dirs:
logger.info("Processing directory: %s " % d)
count = 0
sorted_listdir = sorted(os.listdir(d))
for idx, f in enumerate(sorted_listdir):
# if count > 3:
# break
'''Check if the file is nc and January'''
if f.endswith(ext) and (f.split('.')[0][-2:] in seasons):
nc = netCDF4.Dataset(os.path.join(d, f))
shp = nc.variables[variable].shape
img = np.array(nc.variables[variable]).reshape(shp[0], shp[1], shp[2], channels)
# TODO: ask for details. Why this value is calculated?
'''if era:
offset = nc.variables[variable].add_offset
scaling_factor = nc.variables[variable].scale_factor
packed_array = (np.array(nc.variables[variable]) - offset)/scaling_factor
if -32767 in packed_array:
print('Missing value detected in LR dataset')
else:
img = np.flip(img, axis = 1)'''
if not count:
files = img
else:
files = np.r_[files, img]
count += 1
# Retrieve names for each slice of the array
names.extend(_valid_times(nc, variable).tolist())
nc.close()
logger.info("Loaded %s images count: %d" % (ext, count))
return files, names
def load_path(path):
directories = []
if os.path.isdir(path):
directories.append(path)
dirs = sorted(os.listdir(path))
for elem in dirs:
if os.path.isdir(os.path.join(path, elem)):
directories = directories + load_path(os.path.join(path, elem))
directories.append(os.path.join(path, elem))
return directories
def normalize_img(data, lower, upper, glob_min, glob_max):
return ((upper-lower)*((data.astype(np.float32) - glob_min)/(glob_max - glob_min)) + lower)
def denormalize_img(data, lower, upper, glob_min, glob_max):
return (((data.astype(np.float32) - lower)/(upper - lower))*(glob_max - glob_min) + glob_min)
def get_optimizer():
adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
return adam
# While training save generated image(in form LR, SR, HR)
# Save only one image as sample
def plot_generated_images(output_dir, epoch, generator, x_test_hr, x_test_lr, dim=(1, 3), figsize=(15, 5)):
examples = x_test_hr.shape[0]
logger.info("Plot generated images: %s" % str(examples))
value = randint(0, examples)
image_batch_hr = denormalize(x_test_hr)
image_batch_lr = x_test_lr
gen_img = generator.predict(image_batch_lr)
generated_image = denormalize(gen_img)
image_batch_lr = denormalize(image_batch_lr)
plt.figure(figsize=figsize)
plt.subplot(dim[0], dim[1], 1)
plt.imshow(image_batch_lr[value], interpolation='nearest')
plt.axis('off')
plt.subplot(dim[0], dim[1], 2)
plt.imshow(generated_image[value], interpolation='nearest')
plt.axis('off')
plt.subplot(dim[0], dim[1], 3)
plt.imshow(image_batch_hr[value], interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig(output_dir + 'generated_image_%d.png' % epoch)
# ***
# While training save generated image(in form LR, SR, tp1)
# Save only one image as sample
def best_plot(generator, files_hr_pad_test, files_lr_2x2_test, glob_min, glob_max, channels, imgs_save_dir):
examples = files_hr_pad_test.shape[0]
time_cnt = 0.0
psnr = 0.0
mse = 0.0
ssim = 0.0
fid = 0.0
month_days_cnt = 0
cur_month = 0
image_batch_tp1_tmp = np.copy(files_hr_pad_test[0])
image_batch_tp1_tmp[:,:,0] = 0
generated_image_tmp = np.copy(files_hr_pad_test[0])
generated_image_tmp[:,:,0] = 0
rmse_image_tmp = np.copy(files_hr_pad_test[0])
rmse_image_tmp[:,:,0] = 0
covariance_tmp = np.copy(files_hr_pad_test[0])
covariance_tmp[:,:,0] = 0
corrcoef_tmp = np.copy(files_hr_pad_test[0])
corrcoef_tmp[:,:,0] = 0
spearman_tmp = np.copy(files_hr_pad_test[0])
spearman_tmp[:,:,0] = 0
pvalue_tmp = np.copy(files_hr_pad_test[0])
pvalue_tmp[:,:,0] = 0
corr_tmp = np.copy(files_hr_pad_test[0])
corr_tmp[:,:,0] = 0
image_batch_tp1_list = [ ]
generated_image_list = [ ]
training_set_size = int(len(files_names_lr) - len(files_lr_80x160_test))
examples = files_hr_pad_test.shape[0]
for sample in range(examples):
image_batch_t_resized = np.copy(files_lr_2x2_test[sample])
image_batch_tp1 = np.copy(files_hr_pad_test[sample])
for c in range(channels):
image_batch_tp1[:, :, c] = denormalize_img(image_batch_tp1[:, :, c], lower_hr, upper_hr, glob_min[1][c], glob_max[1][c])
image_batch_tp1_tmp[:,:,c] += image_batch_tp1[:,:,c]
image_batch_tp1_list.append(image_batch_tp1)
month_days_cnt += 1
start_time = time.time( )
generated_image = generator.predict(image_batch_t_resized.reshape(1, image_batch_t_resized.shape[0], image_batch_t_resized.shape[1], image_batch_t_resized.shape[2]))
end_time = time.time()
time_cnt += (end_time - start_time)
generated_image_450x450 = generated_image[0]
#print("GENERATED SHAPE : {}".format(generated_image_450x450.shape))
#check_scales = ((e == 1) or (e == 10) or (e == 50) or ((e % scales_checkpoint_epoch) == 0))
generated_image_450x450[0, :, :, c] = denormalize_img(generated_image[0][0, :, :, c], lower_hr, upper_hr, glob_min[1][c], glob_max[1][c])
gen_image_dataframe = None
if(outlier_removal):
# OUTLIER REMOVAL procedure
###########################
gen_image_dataframe = pd.DataFrame(generated_image_450x450[0, :, :, c])
outlierConstant = 1
upper_quartile = np.percentile(gen_image_dataframe, 75)
lower_quartile = np.percentile(gen_image_dataframe, 25)
IQR = (upper_quartile - lower_quartile) * outlierConstant
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
gen_image_dataframe[(gen_image_dataframe < quartileSet[0])] = lower_quartile - IQR
gen_image_dataframe[(gen_image_dataframe > quartileSet[0])] = lower_quartile + IQR
generated_image_450x450[0,:,:,c] = gen_image_dataframe.values
###########################
generated_image_tmp[:,:,c] += generated_image_450x450[0,:,:,c]
generated_image_list.append(generated_image_450x450[0,:,:,c])
rmse_image_tmp[:,:,c] += ((image_batch_tp1[:,:,c] - generated_image_450x450[0,:,:,c])**2)
cur_step = year_mask_test[cur_month]*4
if(month_days_cnt == cur_step):
for c in range(channels):
for i in range(image_batch_tp1.shape[0]):
for j in range(image_batch_tp1.shape[1]):
dataset_real = [image_batch_tp1_list[k][i][j].item() for k in range(cur_step)]
dataset_gen = [generated_image_list[k][i][j] for k in range(cur_step)]
temp_dataset = np.array((dataset_real, dataset_gen))
covariance_tmp[i,j,c] = np.cov(temp_dataset)[0,1]
corrcoef_tmp[i,j,c] = np.corrcoef(temp_dataset)[0,1]
spearman = stats.spearmanr(temp_dataset, axis=1)
spearman_tmp[i,j,c] = spearman[0]
pvalue_tmp[i,j,c] = spearman[1]
corr_tmp[i,j,c] = np.correlate(dataset_real, dataset_gen)
pd.DataFrame(image_batch_tp1_tmp[24:-25, 7:-6,c] / cur_step).to_csv("{}/real_image_{}_T2M.csv".format(imgs_save_dir, months_years[cur_month]), header = False, index = False)
pd.DataFrame(generated_image_tmp[24:-25, 7:-6,c] / cur_step).to_csv("{}/generated_image_{}_T2M.csv".format(imgs_save_dir, months_years[cur_month]), header = False, index = False)
pd.DataFrame((image_batch_tp1_tmp[24:-25, 7:-6,c]-generated_image_tmp[24:-25, 7:-6,c]) / cur_step).to_csv("{}/diff_image_{}_T2M.csv".format(imgs_save_dir, months_years[cur_month]), header = False, index = False)
pd.DataFrame(np.abs(image_batch_tp1_tmp[24:-25, 7:-6,c]-generated_image_tmp[24:-25, 7:-6,c]) / cur_step).to_csv("{}/abs_diff_image_{}_T2M.csv".format(imgs_save_dir, months_years[cur_month]), header = False, index = False)
pd.DataFrame(np.sqrt(rmse_image_tmp[24:-25, 7:-6,c]/cur_step)).to_csv("{}/rmse_image_{}_T2M.csv".format(imgs_save_dir, months_years[cur_month]), header = False, index = False)
pd.DataFrame(covariance_tmp[24:-25, 7:-6,c]).to_csv("{}/covariance_image_{}_T2M.csv".format(imgs_save_dir, months_years[cur_month]), header = False, index = False)
pd.DataFrame(corrcoef_tmp[24:-25, 7:-6,c]).to_csv("{}/corrcoef_image_{}_T2M.csv".format(imgs_save_dir, months_years[cur_month]), header = False, index = False)
| pd.DataFrame(spearman_tmp[24:-25, 7:-6,c]) | pandas.DataFrame |
import json
import os
import glob
import random
from typing import Union
try:
import xarray as xr
except ModuleNotFoundError:
xr = None
import numpy as np
import pandas as pd
from .datasets import Datasets
from .utils import check_attributes, download, sanity_check
from ai4water.utils.utils import dateandtime_now
try: # shapely may not be installed, as it may be difficult to isntall and is only needed for plotting data.
from ai4water.pre_processing.spatial_utils import plot_shapefile
except ModuleNotFoundError:
plot_shapefile = None
# directory separator
SEP = os.sep
def gb_message():
link = "https://doi.org/10.5285/8344e4f3-d2ea-44f5-8afa-86d2987543a9"
raise ValueError(f"Dwonlaoad the data from {link} and provide the directory "
f"path as dataset=Camels(data=data)")
class Camels(Datasets):
"""
Get CAMELS dataset.
This class first downloads the CAMELS dataset if it is not already downloaded.
Then the selected attribute for a selected id are fetched and provided to the
user using the method `fetch`.
Attributes
-----------
- ds_dir str/path: diretory of the dataset
- dynamic_features list: tells which dynamic attributes are available in
this dataset
- static_features list: a list of static attributes.
- static_attribute_categories list: tells which kinds of static attributes
are present in this category.
Methods
---------
- stations : returns name/id of stations for which the data (dynamic attributes)
exists as list of strings.
- fetch : fetches all attributes (both static and dynamic type) of all
station/gauge_ids or a speficified station. It can also be used to
fetch all attributes of a number of stations ids either by providing
their guage_id or by just saying that we need data of 20 stations
which will then be chosen randomly.
- fetch_dynamic_features :
fetches speficied dynamic attributes of one specified station. If the
dynamic attribute is not specified, all dynamic attributes will be
fetched for the specified station. If station is not specified, the
specified dynamic attributes will be fetched for all stations.
- fetch_static_features :
works same as `fetch_dynamic_features` but for `static` attributes.
Here if the `category` is not specified then static attributes of
the specified station for all categories are returned.
stations : returns list of stations
"""
DATASETS = {
'CAMELS-BR': {'url': "https://zenodo.org/record/3964745#.YA6rUxZS-Uk",
},
'CAMELS-GB': {'url': gb_message},
}
def stations(self):
raise NotImplementedError
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None, en=None):
raise NotImplementedError
def fetch_static_features(self, station, features):
raise NotImplementedError
@property
def start(self): # start of data
raise NotImplementedError
@property
def end(self): # end of data
raise NotImplementedError
@property
def dynamic_features(self)->list:
raise NotImplementedError
def _check_length(self, st, en):
if st is None:
st = self.start
if en is None:
en = self.end
return st, en
def to_ts(self, static, st, en, as_ts=False, freq='D'):
st, en = self._check_length(st, en)
if as_ts:
idx = pd.date_range(st, en, freq=freq)
static = pd.DataFrame(np.repeat(static.values, len(idx), axis=0), index=idx,
columns=static.columns)
return static
else:
return static
@property
def camels_dir(self):
"""Directory where all camels datasets will be saved. This will under
datasets directory"""
return os.path.join(self.base_ds_dir, "CAMELS")
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
if x is None:
x = os.path.join(self.camels_dir, self.__class__.__name__)
if not os.path.exists(x):
os.makedirs(x)
# sanity_check(self.name, x)
self._ds_dir = x
def fetch(self,
stations: Union[str, list, int, float, None] = None,
dynamic_features: Union[list, str, None] = 'all',
static_features: Union[str, list, None] = None,
st: Union[None, str] = None,
en: Union[None, str] = None,
as_dataframe:bool = False,
**kwargs
) -> Union[dict, pd.DataFrame]:
"""
Fetches the attributes of one or more stations.
Arguments:
stations : if string, it is supposed to be a station name/gauge_id.
If list, it will be a list of station/gauge_ids. If int, it will
be supposed that the user want data for this number of
stations/gauge_ids. If None (default), then attributes of all
available stations. If float, it will be supposed that the user
wants data of this fraction of stations.
dynamic_features : If not None, then it is the attributes to be
fetched. If None, then all available attributes are fetched
static_features : list of static attributes to be fetches. None
means no static attribute will be fetched.
st : starting date of data to be returned. If None, the data will be
returned from where it is available.
en : end date of data to be returned. If None, then the data will be
returned till the date data is available.
as_dataframe : whether to return dynamic attributes as pandas
dataframe or as xarray dataset.
kwargs : keyword arguments to read the files
returns:
If both static and dynamic features are obtained then it returns a
dictionary whose keys are station/gauge_ids and values are the
attributes and dataframes.
Otherwise either dynamic or static features are returned.
"""
if isinstance(stations, int):
# the user has asked to randomly provide data for some specified number of stations
stations = random.sample(self.stations(), stations)
elif isinstance(stations, list):
pass
elif isinstance(stations, str):
stations = [stations]
elif isinstance(stations, float):
num_stations = int(len(self.stations()) * stations)
stations = random.sample(self.stations(), num_stations)
elif stations is None:
# fetch for all stations
stations = self.stations()
else:
raise TypeError(f"Unknown value provided for stations {stations}")
if xr is None:
raise ModuleNotFoundError("modeule xarray must be installed to use `datasets` module")
return self.fetch_stations_attributes(stations,
dynamic_features,
static_features,
st=st,
en=en,
as_dataframe=as_dataframe,
**kwargs)
def _maybe_to_netcdf(self, fname:str):
self.dyn_fname = os.path.join(self.ds_dir, f'{fname}.nc')
if not os.path.exists(self.dyn_fname):
# saving all the data in netCDF file using xarray
print(f'converting data to netcdf format for faster io operations')
data = self.fetch(static_features=None)
data_vars = {}
coords = {}
for k, v in data.items():
data_vars[k] = (['time', 'dynamic_features'], v)
index = v.index
index.name = 'time'
coords = {
'dynamic_features': list(v.columns),
'time': index
}
xds = xr.Dataset(
data_vars=data_vars,
coords=coords,
attrs={'date': f"create on {dateandtime_now()}"}
)
xds.to_netcdf(self.dyn_fname)
def fetch_stations_attributes(self,
stations: list,
dynamic_features='all',
static_features=None,
st=None,
en=None,
as_dataframe:bool = False,
**kwargs):
"""Reads attributes of more than one stations.
Arguments:
stations : list of stations for which data is to be fetched.
dynamic_features : list of dynamic attributes to be fetched.
if 'all', then all dynamic attributes will be fetched.
static_features : list of static attributes to be fetched.
If `all`, then all static attributes will be fetched. If None,
then no static attribute will be fetched.
st : start of data to be fetched.
en : end of data to be fetched.
as_dataframe : whether to return the data as pandas dataframe. default
is xr.dataset object
kwargs dict: additional keyword arguments
Returns:
Dynamic and static features of multiple stations. Dynamic features
are by default returned as xr.Dataset unless `as_dataframe` is True, in
such a case, it is a pandas dataframe with multiindex. If xr.Dataset,
it consists of `data_vars` equal to number of stations and for each
station, the `DataArray` is of dimensions (time, dynamic_features).
where `time` is defined by `st` and `en` i.e length of `DataArray`.
In case, when the returned object is pandas DataFrame, the first index
is `time` and second index is `dyanamic_features`. Static attributes
are always returned as pandas DataFrame and have following shape
`(stations, static_features). If `dynamic_features` is None,
then they are not returned and the returned value only consists of
static features. Same holds true for `static_features`.
If both are not None, then the returned type is a dictionary with
`static` and `dynamic` keys.
Raises:
ValueError, if both dynamic_features and static_features are None
"""
st, en = self._check_length(st, en)
if dynamic_features is not None:
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
if not os.path.exists(self.dyn_fname):
# read from csv files
# following code will run only once when fetch is called inside init method
dyn = self._read_dynamic_from_csv(stations, dynamic_features, st=st, en=en)
else:
dyn = xr.load_dataset(self.dyn_fname) # daataset
dyn = dyn[stations].sel(dynamic_features=dynamic_features, time=slice(st, en))
if as_dataframe:
dyn = dyn.to_dataframe(['time', 'dynamic_features'])
if static_features is not None:
static = self.fetch_static_features(stations, static_features)
stns = {'dynamic': dyn, 'static': static}
else:
stns = dyn
elif static_features is not None:
return self.fetch_static_features(stations, static_features)
else:
raise ValueError
return stns
def fetch_dynamic_features(self,
stn_id,
attributes='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches all or selected dynamic attributes of one station."""
assert isinstance(stn_id, str)
station = [stn_id]
return self.fetch_stations_attributes(station,
attributes,
None,
st=st,
en=en,
as_dataframe=as_dataframe)
def fetch_station_attributes(self,
station: str,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
as_ts: bool = False,
st: Union[str, None] = None,
en: Union[str, None] = None,
**kwargs) -> pd.DataFrame:
"""
Fetches attributes for one station.
Arguments:
station : station id/gauge id for which the data is to be fetched.
dynamic_features
static_features
as_ts : whether static attributes are to be converted into a time
series or not. If yes then the returned time series will be of
same length as that of dynamic attribtues.
st : starting point from which the data to be fetched. By default
the data will be fetched from where it is available.
en : end point of data to be fetched. By default the dat will be fetched
Return:
dataframe if as_ts is True else it returns a dictionary of static and
dynamic attributes for a station/gauge_id
"""
st, en = self._check_length(st, en)
station_df = pd.DataFrame()
if dynamic_features:
dynamic = self.fetch_dynamic_features(station, dynamic_features, st=st,
en=en, **kwargs)
station_df = pd.concat([station_df, dynamic])
if static_features is not None:
static = self.fetch_static_features(station, static_features)
if as_ts:
station_df = pd.concat([station_df, static], axis=1)
else:
station_df ={'dynamic': station_df, 'static': static}
elif static_features is not None:
station_df = self.fetch_static_features(station, static_features)
return station_df
class LamaH(Camels):
"""
Large-Sample Data for Hydrology and Environmental Sciences for Central Europe
from url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
paper: https://essd.copernicus.org/preprints/essd-2021-72/
"""
url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
_data_types = ['total_upstrm', 'diff_upstrm_all', 'diff_upstrm_lowimp'
]
time_steps = ['daily', 'hourly'
]
static_attribute_categories = ['']
def __init__(self, *,
time_step: str,
data_type: str,
**kwargs
):
"""
Arguments:
time_step : possible values are `daily` or `hourly`
data_type : possible values are `total_upstrm`, `diff_upstrm_all`
or 'diff_upstrm_lowimp'
"""
assert time_step in self.time_steps, f"invalid time_step {time_step} given"
assert data_type in self._data_types, f"invalid data_type {data_type} given."
self.time_step = time_step
self.data_type = data_type
super().__init__(**kwargs)
self._download()
fpath = os.path.join(self.ds_dir, 'lamah_diff_upstrm_lowimp_hourly_dyn.nc')
_data_types = self._data_types if self.time_step == 'daily' else ['total_upstrm']
if not os.path.exists(fpath):
for dt in _data_types:
for ts in self.time_steps:
self.time_step = ts
self.data_type = dt
fname = f"lamah_{dt}_{ts}_dyn"
self._maybe_to_netcdf(fname)
self.time_step = time_step
self.data_type = data_type
self.dyn_fname = os.path.join(self.ds_dir, f'lamah_{data_type}_{time_step}_dyn.nc')
@property
def dynamic_features(self):
station = self.stations()[0]
df = self.read_ts_of_station(station)
return df.columns.to_list()
@property
def static_features(self) -> list:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
return df.columns.to_list()
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def data_type_dir(self):
directory = 'CAMELS_AT'
if self.time_step == 'hourly':
directory = 'CAMELS_AT1' # todo, use it only for hourly, daily is causing errors
# self.ds_dir/CAMELS_AT/data_type_dir
f = [f for f in os.listdir(os.path.join(self.ds_dir, directory)) if self.data_type in f][0]
return os.path.join(self.ds_dir, f'{directory}{SEP}{f}')
def stations(self)->list:
# assuming file_names of the format ID_{stn_id}.csv
_dirs = os.listdir(os.path.join(self.data_type_dir, f'2_timeseries{SEP}{self.time_step}'))
s = [f.split('_')[1].split('.csv')[0] for f in _dirs]
return s
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
"""Reads attributes of one station"""
stations_attributes = {}
for station in stations:
station_df = pd.DataFrame()
if dynamic_features is not None:
dynamic_df = self.read_ts_of_station(station)
station_df = pd.concat([station_df, dynamic_df])
stations_attributes[station] = station_df
return stations_attributes
def fetch_static_features(self,
station:Union[str, list],
features=None
)->pd.DataFrame:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
#if features is not None:
static_features = check_attributes(features, self.static_features)
df = df[static_features]
if isinstance(station, list):
stations = [str(i) for i in station]
elif isinstance(station, int):
stations = str(station)
else:
stations = station
df.index = df.index.astype(str)
df = df.loc[stations]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
def read_ts_of_station(self, station) -> pd.DataFrame:
# read a file containing timeseries data for one station
fname = os.path.join(self.data_type_dir,
f'2_timeseries{SEP}{self.time_step}{SEP}ID_{station}.csv')
df = pd.read_csv(fname, sep=';')
if self.time_step == 'daily':
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], freq="D")
df.index = periods.to_timestamp()
else:
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], hour=df["hh"], minute=df["mm"], freq="H")
df.index = periods.to_timestamp()
# remove the cols specifying index
[df.pop(item) for item in ['YYYY', 'MM', 'DD', 'hh', 'mm'] if item in df]
return df
@property
def start(self):
return "19810101"
@property
def end(self):
return "20191231"
class HYSETS(Camels):
"""
database for hydrometeorological modeling of 14,425 North American watersheds
from 1950-2018 following the work of
[Arsenault et al., 2020](https://doi.org/10.1038/s41597-020-00583-2)
The user must manually download the files, unpack them and provide
the `path` where these files are saved.
This data comes with multiple sources. Each source having one or more dynamic_features
Following data_source are available.
|sources | dynamic_features |
|---------------|------------------|
|SNODAS_SWE | dscharge, swe|
|SCDNA | discharge, pr, tasmin, tasmax|
|nonQC_stations | discharge, pr, tasmin, tasmax|
|Livneh | discharge, pr, tasmin, tasmax|
|ERA5 | discharge, pr, tasmax, tasmin|
|ERAS5Land_SWE | discharge, swe|
|ERA5Land | discharge, pr, tasmax, tasmin|
all sources contain one or more following dynamic_features
with following shapes
|dynamic_features | shape |
|----------------------------|------------|
|time | (25202,) |
|watershedID | (14425,) |
|drainage_area | (14425,) |
|drainage_area_GSIM | (14425,) |
|flag_GSIM_boundaries | (14425,) |
|flag_artificial_boundaries | (14425,) |
|centroid_lat | (14425,) |
|centroid_lon | (14425,) |
|elevation | (14425,) |
|slope | (14425,) |
|discharge | (14425, 25202) |
|pr | (14425, 25202) |
|tasmax | (14425, 25202) |
|tasmin | (14425, 25202) |
"""
doi = "https://doi.org/10.1038/s41597-020-00583-2"
url = "https://osf.io/rpc3w/"
Q_SRC = ['ERA5', 'ERA5Land', 'ERA5Land_SWE', 'Livneh', 'nonQC_stations', 'SCDNA', 'SNODAS_SWE']
SWE_SRC = ['ERA5Land_SWE', 'SNODAS_SWE']
OTHER_SRC = [src for src in Q_SRC if src not in ['ERA5Land_SWE', 'SNODAS_SWE']]
dynamic_features = ['discharge', 'swe', 'tasmin', 'tasmax', 'pr']
def __init__(self,
path:str,
swe_source:str = "SNODAS_SWE",
discharge_source: str = "ERA5",
tasmin_source: str = "ERA5",
tasmax_source: str = "ERA5",
pr_source: str = "ERA5",
**kwargs
):
"""
Arguments:
path : path where all the data files are saved.
swe_source : source of swe data.
discharge_source : source of discharge data
tasmin_source : source of tasmin data
tasmax_source : source of tasmax data
pr_source : source of pr data
kwargs : arguments for `Camels` base class
"""
assert swe_source in self.SWE_SRC, f'source must be one of {self.SWE_SRC}'
assert discharge_source in self.Q_SRC, f'source must be one of {self.Q_SRC}'
assert tasmin_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert tasmax_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert pr_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
self.sources = {
'swe': swe_source,
'discharge': discharge_source,
'tasmin': tasmin_source,
'tasmax': tasmax_source,
'pr': pr_source
}
super().__init__(**kwargs)
self.ds_dir = path
fpath = os.path.join(self.ds_dir, 'hysets_dyn.nc')
if not os.path.exists(fpath):
self._maybe_to_netcdf('hysets_dyn')
def _maybe_to_netcdf(self, fname:str):
# todo saving as one file takes very long time
oneD_vars = []
twoD_vars = []
for src in self.Q_SRC:
xds = xr.open_dataset(os.path.join(self.ds_dir, f'HYSETS_2020_{src}.nc'))
for var in xds.variables:
print(f'getting {var} from source {src} ')
if len(xds[var].data.shape) > 1:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
twoD_vars.append(xar)
else:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
oneD_vars.append(xar)
oneD_xds = xr.merge(oneD_vars)
twoD_xds = xr.merge(twoD_vars)
oneD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_static.nc"))
twoD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_dyn.nc"))
return
@property
def ds_dir(self):
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('HYSETS', x)
self._ds_dir = x
@property
def static_features(self):
df = self.read_static_data()
return df.columns.to_list()
def stations(self) -> list:
return self.read_static_data().index.to_list()
@property
def start(self):
return "19500101"
@property
def end(self):
return "20181231"
def fetch_stations_attributes(self,
stations: list,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
st = None,
en = None,
as_dataframe: bool = False,
**kwargs):
stations = check_attributes(stations, self.stations())
stations = [int(stn) for stn in stations]
if dynamic_features is not None:
dyn = self._fetch_dynamic_features(stations=stations,
dynamic_features=dynamic_features,
as_dataframe=as_dataframe,
**kwargs
)
if static_features is not None: # we want both static and dynamic
to_return = {}
static = self._fetch_static_features(station=stations,
static_features=static_features,
**kwargs
)
to_return['static'] = static
to_return['dynamic'] = dyn
else:
to_return = dyn
elif static_features is not None:
# we want only static
to_return = self._fetch_static_features(
station=stations,
static_features=static_features,
**kwargs
)
else:
raise ValueError
return to_return
def fetch_dynamic_features(self,
station,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches dynamic attributes of one station."""
station = [int(station)]
return self._fetch_dynamic_features(stations=station,
dynamic_features=dynamic_features,
st=st,
en=en,
as_dataframe=as_dataframe)
def _fetch_dynamic_features(self,
stations:list,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False,
as_ts=False
):
"""Fetches dynamic attributes of station."""
st, en = self._check_length(st, en)
attrs = check_attributes(dynamic_features, self.dynamic_features)
stations = np.subtract(stations, 1).tolist()
# maybe we don't need to read all variables
sources = {k:v for k,v in self.sources.items() if k in attrs}
# original .nc file contains datasets with dynamic and static features as data_vars
# however, for uniformity of this API and easy usage, we want a Dataset to have
# station names/gauge_ids as data_vars and each data_var has
# dimension (time, dynamic_variables)
# Therefore, first read all data for each station from .nc file
# then rearrange it.
# todo, this operation is slower because of `to_dataframe`
# also doing this removes all the metadata
x = {}
f = os.path.join(self.ds_dir, "hysets_dyn.nc")
xds = xr.open_dataset(f)
for stn in stations:
xds1 = xds[[f'{k}_{v}' for k, v in sources.items()]].sel(watershed=stn, time=slice(st, en))
xds1 = xds1.rename_vars({f'{k}_{v}': k for k, v in sources.items()})
x[stn] = xds1.to_dataframe(['time'])
xds = xr.Dataset(x)
xds = xds.rename_dims({'dim_1': 'dynamic_features'})
xds = xds.rename_vars({'dim_1': 'dynamic_features'})
if as_dataframe:
return xds.to_dataframe(['time', 'dynamic_features'])
return xds
def _fetch_static_features(self,
station,
static_features:Union[str, list]='all',
st=None,
en=None,
as_ts=False):
df = self.read_static_data()
static_features = check_attributes(static_features, self.static_features)
if isinstance(station, str):
station = [station]
elif isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
else:
raise ValueError
return self.to_ts(df.loc[station][static_features], st=st, en=en, as_ts=as_ts)
def fetch_static_features(self,
station,
features='all',
st=None,
en=None,
as_ts=False
)->pd.DataFrame:
return self._fetch_static_features(station, features, st, en, as_ts)
def read_static_data(self):
fname = os.path.join(self.ds_dir, 'HYSETS_watershed_properties.txt')
static_df = pd.read_csv(fname, index_col='Watershed_ID', sep=';')
static_df.index = static_df.index.astype(str)
return static_df
class CAMELS_US(Camels):
"""
Downloads and processes CAMELS dataset of 671 catchments named as CAMELS
from https://ral.ucar.edu/solutions/products/camels
https://doi.org/10.5194/hess-19-209-2015
"""
DATASETS = ['CAMELS_US']
url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip"
catchment_attr_url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip"
folders = {'basin_mean_daymet': f'basin_mean_forcing{SEP}daymet',
'basin_mean_maurer': f'basin_mean_forcing{SEP}maurer',
'basin_mean_nldas': f'basin_mean_forcing{SEP}nldas',
'basin_mean_v1p15_daymet': f'basin_mean_forcing{SEP}v1p15{SEP}daymet',
'basin_mean_v1p15_nldas': f'basin_mean_forcing{SEP}v1p15{SEP}nldas',
'elev_bands': f'elev{SEP}daymet',
'hru': f'hru_forcing{SEP}daymet'}
dynamic_features = ['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)', 'Flow']
def __init__(self, data_source='basin_mean_daymet'):
assert data_source in self.folders, f'allwed data sources are {self.folders.keys()}'
self.data_source = data_source
super().__init__("CAMELS_US")
if os.path.exists(self.ds_dir):
print(f"dataset is already downloaded at {self.ds_dir}")
else:
download(self.url, os.path.join(self.camels_dir, f'CAMELS_US{SEP}CAMELS_US.zip'))
download(self.catchment_attr_url, os.path.join(self.camels_dir, f"CAMELS_US{SEP}catchment_attrs.zip"))
self._unzip()
self.attr_dir = os.path.join(self.ds_dir, f'catchment_attrs{SEP}camels_attributes_v2.0')
self.dataset_dir = os.path.join(self.ds_dir, f'CAMELS_US{SEP}basin_dataset_public_v1p2')
self._maybe_to_netcdf('camels_us_dyn')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def start(self):
return "19800101"
@property
def end(self):
return "20141231"
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=';', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='gauge_id', nrows=1)
cols = list(df.columns)
return cols
def stations(self) -> list:
stns = []
for _dir in os.listdir(os.path.join(self.dataset_dir, 'usgs_streamflow')):
cat = os.path.join(self.dataset_dir, f'usgs_streamflow{SEP}{_dir}')
stns += [fname.split('_')[0] for fname in os.listdir(cat)]
# remove stations for which static values are not available
for stn in ['06775500', '06846500', '09535100']:
stns.remove(stn)
return stns
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
dyn = {}
for station in stations:
# attributes = check_attributes(dynamic_features, self.dynamic_features)
assert isinstance(station, str)
df = None
df1 = None
dir_name = self.folders[self.data_source]
for cat in os.listdir(os.path.join(self.dataset_dir, dir_name)):
cat_dirs = os.listdir(os.path.join(self.dataset_dir, f'{dir_name}{SEP}{cat}'))
stn_file = f'{station}_lump_cida_forcing_leap.txt'
if stn_file in cat_dirs:
df = pd.read_csv(os.path.join(self.dataset_dir,
f'{dir_name}{SEP}{cat}{SEP}{stn_file}'),
sep="\s+|;|:",
skiprows=4,
engine='python',
names=['Year', 'Mnth', 'Day', 'Hr', 'dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)'],
)
df.index = pd.to_datetime(df['Year'].map(str) + '-' + df['Mnth'].map(str) + '-' + df['Day'].map(str))
flow_dir = os.path.join(self.dataset_dir, 'usgs_streamflow')
for cat in os.listdir(flow_dir):
cat_dirs = os.listdir(os.path.join(flow_dir, cat))
stn_file = f'{station}_streamflow_qc.txt'
if stn_file in cat_dirs:
fpath = os.path.join(flow_dir, f'{cat}{SEP}{stn_file}')
df1 = pd.read_csv(fpath, sep="\s+|;|:'",
names=['station', 'Year', 'Month', 'Day', 'Flow', 'Flag'],
engine='python')
df1.index = pd.to_datetime(
df1['Year'].map(str) + '-' + df1['Month'].map(str) + '-' + df1['Day'].map(str))
out_df = pd.concat([df[['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)', 'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)']],
df1['Flow']],
axis=1)
dyn[station] = out_df
return dyn
def fetch_static_features(self, station, features):
attributes = check_attributes(features, self.static_features)
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
static_df = pd.DataFrame()
for f in files:
# index should be read as string
idx = pd.read_csv(f, sep=';', usecols=['gauge_id'], dtype=str)
_df = pd.read_csv(f, sep=';', index_col='gauge_id')
_df.index = idx['gauge_id']
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath, index_label='gauge_id')
else: # index should be read as string bcs it has 0s at the start
idx = pd.read_csv(static_fpath, usecols=['gauge_id'], dtype=str)
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
static_df.index = idx['gauge_id']
static_df.index = static_df.index.astype(str)
df = static_df.loc[station][attributes]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
class CAMELS_BR(Camels):
"""
Downloads and processes CAMELS dataset of Brazil
"""
url = "https://zenodo.org/record/3964745#.YA6rUxZS-Uk"
folders = {'streamflow_m3s': '02_CAMELS_BR_streamflow_m3s',
'streamflow_mm': '03_CAMELS_BR_streamflow_mm_selected_catchments',
'simulated_streamflow_m3s': '04_CAMELS_BR_streamflow_simulated',
'precipitation_cpc': '07_CAMELS_BR_precipitation_cpc',
'precipitation_mswep': '06_CAMELS_BR_precipitation_mswep',
'precipitation_chirps': '05_CAMELS_BR_precipitation_chirps',
'evapotransp_gleam': '08_CAMELS_BR_evapotransp_gleam',
'evapotransp_mgb': '09_CAMELS_BR_evapotransp_mgb',
'potential_evapotransp_gleam': '10_CAMELS_BR_potential_evapotransp_gleam',
'temperature_min': '11_CAMELS_BR_temperature_min_cpc',
'temperature_mean': '12_CAMELS_BR_temperature_mean_cpc',
'temperature_max': '13_CAMELS_BR_temperature_max_cpc'
}
def __init__(self):
super().__init__("CAMELS-BR")
self._download()
self._maybe_to_netcdf('camels_dyn_br')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def _all_dirs(self):
"""All the folders in the dataset_directory"""
return [f for f in os.listdir(self.ds_dir) if os.path.isdir(os.path.join(self.ds_dir, f))]
@property
def static_dir(self):
path = None
for _dir in self._all_dirs:
if "attributes" in _dir:
# supposing that 'attributes' axist in only one file/folder in self.ds_dir
path = os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}')
return path
@property
def static_files(self):
all_files = None
if self.static_dir is not None:
all_files = glob.glob(f"{self.static_dir}/*.txt")
return all_files
@property
def dynamic_features(self) -> list:
return list(CAMELS_BR.folders.keys())
@property
def static_attribute_categories(self):
static_attrs = []
for f in self.static_files:
ff = str(os.path.basename(f).split('.txt')[0])
static_attrs.append('_'.join(ff.split('_')[2:]))
return static_attrs
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, '01_CAMELS_BR_attributes','01_CAMELS_BR_attributes')}/*.txt")
cols = []
for f in files:
_df = | pd.read_csv(f, sep=' ', index_col='gauge_id', nrows=1) | pandas.read_csv |
import pandas as pd
#import numpy as np
from sklearn.ensemble import RandomForestClassifier #, RandomForestRegressor
from sklearn.metrics import roc_auc_score #, mean_squared_error
# 2018.11.28 Created by Eamon.Zhang
def feature_shuffle_rf(X_train,y_train,max_depth=None,class_weight=None,top_n=15,n_estimators=50,random_state=0):
model = RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,
random_state=random_state,class_weight=class_weight,
n_jobs=-1)
model.fit(X_train, y_train)
train_auc = roc_auc_score(y_train, (model.predict_proba(X_train))[:, 1])
feature_dict = {}
# selection logic
for feature in X_train.columns:
X_train_c = X_train.copy().reset_index(drop=True)
y_train_c = y_train.copy().reset_index(drop=True)
# shuffle individual feature
X_train_c[feature] = X_train_c[feature].sample(frac=1,random_state=random_state).reset_index(
drop=True)
#print(X_train_c.isnull().sum())
# make prediction with shuffled feature and calculate roc-auc
shuff_auc = roc_auc_score(y_train_c,
(model.predict_proba(X_train_c))[:, 1])
#print(shuff_auc)
# save the drop in roc-auc
feature_dict[feature] = (train_auc - shuff_auc)
#print(feature_dict)
auc_drop = | pd.Series(feature_dict) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Use this URL for a Google Colab Demo of this class and its usage:
https://colab.research.google.com/drive/154_2tvDn_36pZzU_XkSv9Xvd3KjQCw1U
"""
from datetime import timedelta, datetime, timezone
import sys, os, time, random
import pandas as pd
import json
import csv
import sqlite3
from sqlite3 import Error
import ccxt
import yfinance as yf
class Data():
"""
Class Wraps CCXT and Yahoo Finance Data Fetching Functions
"""
timedeltas_timeframe_suffixes = {
"s": timedelta(seconds=1),
"m": timedelta(minutes=1),
"h": timedelta(hours=1),
"d": timedelta(days=1),
"w": timedelta(days=7),
"M": timedelta(days=31),
"Y": timedelta(weeks=52), # option for fetch trades
"y": timedelta(weeks=52) # lowercase alias
}
class CCXT():
"""
The majority of code credit goes to:
https://github.com/Celeborn2BeAlive/cryptobigbro
exchange_id: Any exchange id available thru CCXT
https://github.com/ccxt/ccxt/wiki/Manual#exchanges
symbol: A slash is used for all symbols except on BitMEX Futures.
eg. XRPH20 has no slash,
but BTC/USD and ETH/USD are how they identify the USD pairs.
timeframe: Any timeframe available on the chosen exchange.
candle_amount: Use 'all' to get FULL candle history.
Default is 500.
trades_amount: Use 'all' to get FULL trade history.
Default is '10m' aka 10 mins.
save_path: Use if you want to save data as .csv file or SQLite DB.
save_format: 'csv' or 'sqlite' are the database options.
TT_Format: True would set columns with a prefix using the base symbol
eg. BTC:open, BTC:close, BTC:volume
Example Usage:
from tensortrade.utils.ccxt_data_fetcher import CCXT_Data
# Fetch Trades
trades = CCXT_Data.fetch_trades(
exchange = 'bitmex',
symbol = 'BTC/USD',
trades_amount = '10m', ## Get 10 minutes worth of trades
save_path = '/content/drive/My Drive/',
save_format = 'csv'
)
# Fetch Candles
ohlcv = CCXT_Data.fetch_candles(
exchange = 'binance',
symbol = 'BTC/USDT',
timeframe = '1d',
candle_amount = '1000', ## Get 1000 1 Day candles
save_path = '/content/drive/My Drive/Crypto_SQLite_DBs/',
save_format = 'sqlite'
)
"""
@classmethod
def fetch_candles(cls,
exchange: str = 'binance',
symbol: str = 'BTC/USDT',
timeframe: str = '1d',
candle_amount: int = 1000,
save_path = '',
save_format: str = 'csv',
limit: int = 1000,
TT_Format=False):
"""
Fetch OHLCV aka Candle Data using CCXT
Able to fetch full available candle history
Options to save to CSV or SQLite DB files
"""
mk_path = ''
path = save_path
path_to_db_file = ''
csv = False
sqlite = False
if path:
if save_format.lower() == 'csv':
csv = True
if save_format.lower() == 'sqlite':
sqlite = True
exchange_id = exchange.lower()
symbol = symbol.upper()
# Init CCXT exchange object
ccxt_exchange = getattr(ccxt, exchange_id)({
'enableRateLimit': True
})
ccxt_exchange.load_markets() # Requisite CCXT step
all_symbols = [symbol for symbol in ccxt_exchange.symbols] # Get all valid symbols on exchange
all_timeframes = [tf for tf in ccxt_exchange.timeframes] # Get all valid timeframes
timeframe = timeframe if timeframe in all_timeframes else None
symbol = symbol if symbol in all_symbols else None
# Skip to next symbol if not found on exchange
if not symbol:
print("[ERROR] Unsupported symbol {} for exchange {}.".format(symbol, exchange_id))
return None
if not timeframe: # Skip if TF not available on symbol
print("[ERROR] Unsupported timeframe {} for {}.".format(timeframe, exchange_id))
return None
print("-- Fetching {} candles for {}".format(timeframe, symbol))
# Grab most recent timestamp if data exists already
if type(candle_amount) != str:
if candle_amount > 0:
if timeframe.endswith('M'):
_ = timeframe.split('M')
c = int(_[0])
# Special case for month because it has not fixed timedelta
since = datetime.utcnow() - (c * candle_amount * Data._timedelta(timeframe))
since = datetime(since.year, since.month, 1, tzinfo=timezone.utc)
else:
since = datetime.utcnow() - (candle_amount * Data._timedelta(timeframe))
elif candle_amount.lower() == 'all':
since = datetime(1970, 1, 1, tzinfo=timezone.utc)
else:
if timeframe.endswith('M'):
since = datetime(1970, 1, 1, tzinfo=timezone.utc)
else:
since = datetime.utcnow() - (500 * Data._timedelta(timeframe))
since = Data._earliest_datetime(since) # sanitize if date is earlier than 1970
main_path = ccxt_exchange.id + '/' + symbol.replace('/','_') + '_' + timeframe
if csv:
path_to_db_file = path + 'csv/' + main_path + '.csv'
mk_path = path + 'csv/'
path = path + 'csv/' + ccxt_exchange.id + '/'
elif sqlite:
path_to_db_file = path + 'sqlite/' + main_path + '.sqlite'
mk_path = path + 'sqlite/'
path = path + 'sqlite/' + ccxt_exchange.id + '/'
df = pd.DataFrame()
df_db = pd.DataFrame() # If DB File exists, load it to grab most recent candle timestamp
# Fetch candles till done
while True:
# Can make this more efficient by making it save the timestamp, and load it if else
if path and os.path.exists(path_to_db_file):
#print("\t\t-- Loading existing history from file {} to get next timestamp.".format(path_to_db_file))
if csv:
df_db = pd.read_csv(path_to_db_file)
if sqlite:
conn = Data.Save_Data._load_sqlite_db(path_to_db_file)
if conn:
df_db = Data._sqlite_to_dataframe(conn, table='ohlcv')
if not df_db.empty:
since = datetime.fromtimestamp(df_db.timestamp.values[-1], timezone.utc) # said close tiemstamp before, not open
# Check if candle DB is up to date
next_open_date = Data._compute_end_timestamp(since, timeframe) + Data._timedelta('1s')
if datetime.now(tz=timezone.utc) < next_open_date:
print("\t-- The next candle time is {}, no request needed.".format(since + Data._timedelta(timeframe)))
continue
# Fetch candle data with CCXT
print("\t-- Fetching candles from {}".format(since.strftime('%m/%d/%Y, %H:%M:%S')))
retries = 3
while retries > 0:
try:
df = ccxt_exchange.fetch_ohlcv(symbol=symbol,
timeframe=timeframe,
since=int(since.timestamp()*1000),
limit=limit)
df = pd.DataFrame(data=df, columns=['timestamp','open','high','low','close','volume'])
df['timestamp'] = df['timestamp'].apply(lambda t: int(t/1000)) # convert timestamp from nanoseconds to milliseconds (expected by datetime)
break
except Exception as error:
if retries == 3:
print('Retry 1/3 | Got an error', type(error).__name__, error.args, ', retrying in 3 seconds.')
time.sleep(3)
elif retries == 2:
print('Retry 2/3 | Got an error', type(error).__name__, error.args, ', retrying in 10 seconds...')
time.sleep(10)
else:
print('Final Retry: Got an error', type(error).__name__, error.args, ', retrying in 25 seconds...')
time.sleep(25)
retries -= 1
#else:
print("\t\t-- {} candles received.".format(len(df)))
# Writing data to DB format of choice
if not df_db.empty:
df = df_db.append(df)
df.drop_duplicates('timestamp', inplace=True)
# Save Data to DB file
if csv:
Data.Save_Data.as_csv(df, path_to_db_file)
elif sqlite:
sql_query = 'create table if not exists ohlcv (timestamp, open, high, low, close, volume)'
sql_table_name = 'ohlcv'
Data.Save_Data.as_sqlite(df, path_to_db_file, sql_table_name, sql_query)
new_since_date = df.timestamp.values[-1] # Most recent timestamp in db
if df.empty or since.timestamp() == new_since_date:
print("\t\t-- No new candles received for timeframe: {}, work is done.".format(timeframe))
break
df_db = df.copy()
df = pd.DataFrame()
# Update last candle date
since = datetime.fromtimestamp(df_db.timestamp.values[-1], timezone.utc)
#time.sleep(ccxt_exchange.rateLimit * 5 / 1000) # soften IO Load
# After getting all the candles, format and return for tensortrade use
# Format OHCLV Data for the TensorTrade DataFeed
df_db.sort_values(by='timestamp', ascending=True, inplace=True)
if TT_Format:
df_db = df_db.rename({"timestamp": "Date"}, axis='columns')
df_db['Date'] = df_db['Date'].apply(lambda x: datetime.utcfromtimestamp(x))
df_db['Date'] = df_db['Date'].dt.strftime('%Y-%m-%d %H:%M %p')
df_db = df_db.set_index("Date")
# Format column names for tensortrade use
if ccxt_exchange.id != 'bitmex' and '/' in symbol:
base, quote = symbol.split('/')
else:
base = symbol[:3]
df_db.columns = [base + ":" + name.lower() for name in df_db.columns]
else:
df_db = df_db.rename({"timestamp": "Date"}, axis='columns')
df_db['Date'] = df_db['Date'].apply(lambda x: datetime.utcfromtimestamp(x))
df_db = df_db.set_index("Date")
print('\t\t\t-- Total Candles: ' + str(len(df_db)) + '\n')
return df_db
@classmethod
def fetch_trades(cls,
exchange: str = 'bitmex',
symbol: str = 'BTC/USD',
trades_amount: str = '10m',
save_path: str = '',
save_format: str = 'csv',
limit: int=1000,
TT_Format=False):
"""
Fetch Trades aka Tick Data using CCXT
Able to fetch full available trade history
Options to save to CSV or SQLite DB files
resample_ticks() converts trades to any candle timeframe
"""
mk_path = ''
path = save_path
path_to_db_file = ''
since = None
csv = False
sqlite = False
if path:
if save_format.lower() == 'csv':
csv = True
if save_format.lower() == 'sqlite':
sqlite = True
exchange_id = exchange.lower()
symbol = symbol.upper()
# Init CCXT exchange object
ccxt_exchange = getattr(ccxt, exchange_id)({
'enableRateLimit': True
})
ccxt_exchange.load_markets() # Requisite CCXT step
all_symbols = [symbol for symbol in ccxt_exchange.symbols] # Get all valid symbols on exchange
symbol = symbol if symbol in all_symbols else None
# Skip to next symbol if not found on exchange
if not symbol:
print("[ERROR] Unsupported symbol {} for exchange {}.".format(symbol, exchange_id))
return None
print("-- Fetching Trades for {}".format(symbol))
main_path = ccxt_exchange.id + '/' + symbol.replace('/','_') + '_Tick_Data'
if csv:
path_to_db_file = path + 'csv/' + main_path + '.csv'
mk_path = path + 'csv/'
path = path + 'csv/' + ccxt_exchange.id + '/'
elif sqlite:
path_to_db_file = path + 'sqlite/' + main_path + '.sqlite'
mk_path = path + 'sqlite/'
path = path + 'sqlite/' + ccxt_exchange.id + '/'
# Load previous DB if exists
# Can make this more efficient by making it save the timestamp, and load it if else
if path and os.path.exists(path_to_db_file):
if csv:
df_db = pd.read_csv(path_to_db_file)
if sqlite:
conn = Save_Data._load_sqlite_db(path_to_db_file)
if conn:
df_db = Data._sqlite_to_dataframe(conn, table='trades')
else:
df_db = pd.DataFrame() # If DB File exists, load it to grab most recent candle timestamp
prev_df_len = len(df_db) # Useful to stop endless loop later
# Grab most recent timestamp if data exists already
# Else set a default start date
if trades_amount != 'all':
since = datetime.utcnow() - Data._timedelta(trades_amount)
elif trades_amount.lower() == 'all': #or cls.since
# check if since can be converted to datetime
# try to conver it with default format, and failing that
# except: shove it into datetime
since = datetime(1970, 1, 1, tzinfo=timezone.utc) # Earliest possible
endless_loop_protection = 0
# Fetch trades till done
while True:
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Grading
"""
#These are the packages you need to install, this will try to install them, otherwise use pip to install
try:
import requests
except:
import pip
pip.main(['install', 'requests'])
import requests
try:
import pandas as pd
except:
import pip
pip.main(['install', 'pandas'])
import pandas as pd
try:
import json
except:
import pip
pip.main(['install', 'json'])
import json
#Ensure Canvas API token is in the designated file Canvas API Token.txt
print ('Before you begin the process, please ensure you have copy & pasted your Canvas API token into the file Canvas API Token.txt.')
confirmation = input ('Input any key to continue:')
with open('Canvas API Token.txt','r') as f:
for line in f:
for word in line.split():
token = word
#Course url
url = "https://ubc.instructure.com/"
#Course number
course = input('Input course ID and hit ENTER:\n')
#Input assignment ID number (located in URL)
assignment_id = input('Input assignment ID number and hit ENTER:\n')
print ('Processing data, please wait......\n')
try:
#Obtaining the assignment information (settings, assignment id, rubric id)
assignmentInfo = requests.get(url + '/api/v1/courses/' + str(course) + '/assignments/' + str(assignment_id),
headers= {'Authorization': 'Bearer ' + token})
#Extracting assignment rubric id/rubric for the assignment
assignmentInfo = json.loads(assignmentInfo.text)
rubric_id = str(assignmentInfo['rubric_settings']['id'])
payload = {'include': 'peer_assessments',
'style' : 'full'}
r = requests.get(url + '/api/v1/courses/' + str(course) + '/rubrics/' + rubric_id,
params = payload,
headers= {'Authorization': 'Bearer ' + token})
rubric_return = json.loads(r.text)
#Obtaining assessor_id (person who did peer review), score for the peer reviews
assessments_df = | pd.DataFrame(rubric_return['assessments']) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
weights1 = pd.DataFrame(vals, index=widx, columns=cols)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')],
names=("date", "contract"))
cols = | pd.Index(["CO1"], name="generic") | pandas.Index |
import pandas as pd
from functools import reduce
from fooltrader.contract.files_contract import *
import re
import json
class agg_future_dayk(object):
funcs={}
def __init__(self):
self.funcs['shfeh']=self.getShfeHisData
self.funcs['shfec']=self.getShfeCurrentYearData
self.funcs['ineh']=self.getIneHisData
self.funcs['inec']=self.getIneCurrentYearData
self.funcs['dceh']=self.getDceHisData
self.funcs['dcec']=self.getDceCurrentYearData
self.funcs['czceh']=self.getCzceHisData
self.funcs['czcec']=self.getCzceCurrentYearData
self.funcs['cffexh']=self.getCffexHisData
self.funcs['cffexc']=self.getCffexCurrentYearData
def getCurrentYearAllData(self,exchange=None):
if exchange is None:
exchanges=['cffex','dce','czce','shfe',"ine"]
pds = list(map(lambda x:self.getCurrentYearData(x),exchanges))
finalpd = | pd.concat(pds) | pandas.concat |
from typing import Sequence
import pandas as pd
import numpy as np
from datetime import date
def replace_values_having_less_count(dataframe: pd.DataFrame, target_cols: Sequence[str], threshold: int = 100, replace_with="OTHER") -> pd.DataFrame:
for c in target_cols:
vc = dataframe[c].value_counts()
replace_dict = {v: f"{replace_with}_{c.strip().upper()}S" for v in list(vc[vc <= threshold].index)}
dataframe[c] = dataframe[c].replace(replace_dict)
return dataframe
def get_days_from_date(df: pd.DataFrame, date_col_names: Sequence[str]) -> pd.DataFrame:
current_date = np.datetime64(date.today())
for c in date_col_names:
new_col_name = f"DAYS_SINCE_{c.replace('_DATE', '')}"
df[new_col_name] = (pd.to_datetime(df[c]).astype(np.datetime64) - current_date).dt.days
df.drop(c, axis=1, inplace=True)
return df
def get_single_valued_columns(df: pd.DataFrame) -> Sequence[str]:
return [item[0] for item in list(zip(df.columns, list(map(lambda x: len(df[x].value_counts()), df.columns)))) if item[1] == 1]
def get_dummies_for_col(data_frame: pd.DataFrame, col_names: Sequence[str]):
for column in col_names:
data_frame = pd.concat([data_frame, | pd.get_dummies(data_frame[column], drop_first=True) | pandas.get_dummies |
import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict, defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats, integrate
# load msncodes
msncodes = pd.read_csv("data/csv/original/msncodes.csv")
# load state data
az = pd.read_csv("data/csv/state_data/az_data.csv", engine='c', low_memory=True)
ca = | pd.read_csv("data/csv/state_data/ca_data.csv", engine='c', low_memory=True) | pandas.read_csv |
import pandas as pd
from unittest import TestCase # or `from unittest import ...` if on Python 3.4+
import tests.helpers as th
import numpy as np
import category_encoders as encoders
class TestLeaveOneOutEncoder(TestCase):
def test_leave_one_out(self):
np_X = th.create_array(n_rows=100)
np_X_t = th.create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = th.create_dataset(n_rows=100)
X_t = th.create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = | pd.DataFrame(np_y_t) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 13:21:08 2019
@author: tatumhennig
"""
import numpy as np
import pandas as pd
## ROT rotates and flips a quadrant appropriately.
# Parameters:
# Input, integer N, the length of a side of the square.
# N must be a power of 2.
# Input/output, integer X, Y, the coordinates of a point.
# Input, integer RX, RY, ???
def rot( n, x, y, rx, ry ):
if ( ry == 0 ):
# Reflect.
if ( rx == 1 ):
x = n - 1 - x
y = n - 1 - y
# Flip.
t = x
x = y
y = t
return x, y
## XY2D converts a 2D Cartesian coordinate to a 1D Hilbert coordinate.
# Discussion:
# It is assumed that a square has been divided into an NxN array of cells,
# where N is a power of 2.
# Cell (0,0) is in the lower left corner, and (N-1,N-1) in the upper
# right corner.
# Parameters:
# integer M, the index of the Hilbert curve.
# The number of cells is N=2^M.
# 0 < M.
# Input, integer X, Y, the Cartesian coordinates of a cell.
# 0 <= X, Y < N.
# Output, integer D, the Hilbert coordinate of the cell.
# 0 <= D < N * N.
def xy2d(x,y):
m = 10 # index of hilbert curve
n = 1024 # number of boxes (2^m)
xcopy = x
ycopy = y
d = 0
n = 2 ** m
s = ( n // 2 )
while ( 0 < s ):
if ( 0 < ( abs ( xcopy ) & s ) ):
rx = 1
else:
rx = 0
if ( 0 < ( abs ( ycopy ) & s ) ):
ry = 1
else:
ry = 0
d = d + s * s * ( ( 3 * rx ) ^ ry )
xcopy, ycopy = rot(s, xcopy, ycopy, rx, ry )
s = ( s // 2 )
return d
#*****************************************************************************#
# load in phi psi csv
name = 'wt_pH7_300K_water'
data = | pd.read_csv(name + '_phipsi.csv') | pandas.read_csv |
from typing import Optional, Union, Tuple
import numpy as np
import pandas as pd
import okama.common.helpers.ratios as ratios
from .common.helpers.helpers import Frame, Float, Date, Index
from .common.make_asset_list import ListMaker
class AssetList(ListMaker):
"""
The list of financial assets implementation.
AssetList can include stocks, ETF, mutual funds, commodities, currencies and stock indexes (benchmarks).
Parameters
----------
assets : list, default None
List of assets. Could include tickers or asset like objects (Asset, Portfolio).
If None a single asset list with a default ticker is used.
first_date : str, default None
First date of monthly return time series.
If None the first date is calculated automatically as the oldest available date for the listed assets.
last_date : str, default None
Last date of monthly return time series.
If None the last date is calculated automatically as the newest available date for the listed assets.
ccy : str, default 'USD'
Base currency for the list of assets. All risk metrics and returns are adjusted to the base currency.
inflation : bool, default True
Defines whether to take inflation data into account in the calculations.
Including inflation could limit available data (last_date, first_date)
as the inflation data is usually published with a one-month delay.
With inflation = False some properties like real return are not available.
"""
def __repr__(self):
dic = {
"assets": self.symbols,
"currency": self._currency.ticker,
"first_date": self.first_date.strftime("%Y-%m"),
"last_date": self.last_date.strftime("%Y-%m"),
"period_length": self._pl_txt,
"inflation": self.inflation if hasattr(self, "inflation") else "None",
}
return repr(pd.Series(dic))
@property
def wealth_indexes(self) -> pd.DataFrame:
"""
Calculate wealth index time series for the assets and accumulated inflation.
Wealth index (Cumulative Wealth Index) is a time series that presents the value of each asset over
historical time period. Accumulated inflation time series is added if `inflation=True` in the AssetList.
Wealth index is obtained from the accumulated return multiplicated by the initial investments.
That is: 1000 * (Acc_Return + 1)
Initial investments are taken as 1000 units of the AssetList base currency.
Returns
-------
DataFrame
Time series of wealth index values for each asset and accumulated inflation.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = ok.AssetList(['SPY.US', 'BND.US'])
>>> x.wealth_indexes.plot()
>>> plt.show()
"""
df = self._add_inflation()
return Frame.get_wealth_indexes(df)
@property
def risk_monthly(self) -> pd.Series:
"""
Calculate monthly risk (standard deviation of return) for each asset.
Monthly risk of the asset is a standard deviation of the rate of return time series.
Standard deviation (sigma σ) is normalized by N-1.
Monthly risk is calculated for rate of retirun time series for the sample from 'first_date' to
'last_date'.
Returns
-------
Series
Monthly risk (standard deviation) values for each asset in form of Series.
See Also
--------
risk_annual : Calculate annualized risks.
semideviation_monthly : Calculate semideviation monthly values.
semideviation_annual : Calculate semideviation annualized values.
get_var_historic : Calculate historic Value at Risk (VaR).
get_cvar_historic : Calculate historic Conditional Value at Risk (CVaR).
drawdowns : Calculate drawdowns.
Examples
--------
>>> al = ok.AssetList(['GC.COMM', 'SHV.US'], ccy='USD', last_date='2021-01')
>>> al.risk_monthly
GC.COMM 0.050864
SHV.US 0.001419
dtype: float64
"""
return self.assets_ror.std()
@property
def risk_annual(self) -> pd.Series:
"""
Calculate annualized risks (standard deviation) for each asset.
Annualized risk is calculated for rate of retirun time series for the sample from 'first_date' to
'last_date'.
Returns
-------
Series
Annualized risk (standard deviation) values for each asset in form of Series.
See Also
--------
risk_monthly : Calculate montly risk for each asset.
risk_annual : Calculate annualized risks.
semideviation_monthly : Calculate semideviation monthly values.
semideviation_annual : Calculate semideviation annualized values.
get_var_historic : Calculate historic Value at Risk (VaR).
get_cvar_historic : Calculate historic Conditional Value at Risk (CVaR).
drawdowns : Calculate drawdowns.
Notes
-----
CFA recomendations are used to annualize risk values [1]_.
.. [1] `What’s Wrong with Multiplying by the Square Root of Twelve. <https://www.cfainstitute.org/en/research/cfa-digest/2013/11/whats-wrong-with-multiplying-by-the-square-root-of-twelve-digest-summary>`_ <NAME>, CFA Institute Journal Review, 2013
Examples
--------
>>> al = ok.AssetList(['GC.COMM', 'SHV.US'], ccy='USD', last_date='2021-01')
>>> al.risk_annual
GC.COMM 0.195236
SHV.US 0.004960
dtype: float64
"""
risk = self.assets_ror.std()
mean_return = self.assets_ror.mean()
return Float.annualize_risk(risk, mean_return)
@property
def semideviation_monthly(self) -> pd.Series:
"""
Calculate semi-deviation monthly values for each asset.
Semi-deviation (Downside risk) is the risk of the return being below the expected return.
Semi-deviation is calculated for rate of retirun time series for the sample from 'first_date' to
'last_date'.
Returns
-------
Series
Monthly semideviation values for each asset in form of Series.
See Also
--------
risk_monthly : Calculate montly risk for each asset.
risk_annual : Calculate annualized risks.
semideviation_annual : Calculate semideviation annualized values.
get_var_historic : Calculate historic Value at Risk (VaR).
get_cvar_historic : Calculate historic Conditional Value at Risk (CVaR).
drawdowns : Calculate drawdowns.
Examples
--------
>>> al = ok.AssetList(['GC.COMM', 'SHV.US'], ccy='USD', last_date='2021-01')
>>> al.semideviation_monthly
GC.COMM 0.039358
SHV.US 0.000384
dtype: float64
"""
return Frame.get_semideviation(self.assets_ror)
@property
def semideviation_annual(self) -> pd.Series:
"""
Return semideviation annualized values for each asset.
Semi-deviation (Downside risk) is the risk of the return being below the expected return.
Semi-deviation is calculated for rate of retirun time series for the sample from 'first_date' to
'last_date'.
Returns
-------
Series
Annualized semideviation values for each asset in form of Series.
See Also
--------
risk_monthly : Calculate montly risk for each asset.
risk_annual : Calculate annualized risks.
semideviation_monthly : Calculate semideviation monthly values.
get_var_historic : Calculate historic Value at Risk (VaR).
get_cvar_historic : Calculate historic Conditional Value at Risk (CVaR).
drawdowns : Calculate drawdowns.
Examples
--------
>>> al = ok.AssetList(['GC.COMM', 'SHV.US'], ccy='USD', last_date='2021-01')
>>> al.semideviation_annual
GC.COMM 0.115302
SHV.US 0.000560
dtype: float64
"""
return Frame.get_semideviation(self.assets_ror) * 12 ** 0.5
def get_var_historic(self, time_frame: int = 12, level: int = 1) -> pd.Series:
"""
Calculate historic Value at Risk (VaR) for the assets with a given timeframe.
The VaR calculates the potential loss of an investment with a given time frame and confidence level.
Loss is a positive number (expressed in cumulative return).
If VaR is negative there are expected gains at this confidence level.
Parameters
----------
time_frame : int, default 12
Time period size in months
level : int, default 1
Confidence level in percents. Default value is 1%.
Returns
-------
Series
VaR values for each asset in form of Series.
See Also
--------
risk_monthly : Calculate montly risk for each asset.
risk_annual : Calculate annualized risks.
semideviation_monthly : Calculate semideviation monthly values.
semideviation_annual : Calculate semideviation annualized values.
get_cvar_historic : Calculate historic Conditional Value at Risk (CVaR).
drawdowns : Calculate drawdowns.
Examples
--------
>>> x = ok.AssetList(['SPY.US', 'AGG.US'])
>>> x.get_var_historic(time_frame=60, level=1)
SPY.US 0.2101
AGG.US -0.0867
Name: VaR, dtype: float64
"""
df = self.get_rolling_cumulative_return(window=time_frame).loc[:, self.symbols]
return Frame.get_var_historic(df, level)
def get_cvar_historic(self, time_frame: int = 12, level: int = 1) -> pd.Series:
"""
Calculate historic Conditional Value at Risk (CVAR, expected shortfall) for the assets with a given timeframe.
CVaR is the average loss over a specified time period of unlikely scenarios beyond the confidence level.
Loss is a positive number (expressed in cumulative return).
If CVaR is negative there are expected gains at this confidence level.
Parameters
----------
time_frame : int, default 12
Time period size in months
level : int, default 1
Confidence level in percents to calculate the VaR. Default value is 5%.
Returns
-------
Series
CVaR values for each asset in form of Series.
See Also
--------
risk_monthly : Calculate montly risk for each asset.
risk_annual : Calculate annualized risks.
semideviation_monthly : Calculate semideviation monthly values.
semideviation_annual : Calculate semideviation annualized values.
get_var_historic : Calculate historic Value at Risk (VaR).
drawdowns : Calculate drawdowns.
Examples
--------
>>> x = ok.AssetList(['SPY.US', 'AGG.US'])
>>> x.get_cvar_historic(time_frame=60, level=1)
SPY.US 0.2574
AGG.US -0.0766
dtype: float64
Name: VaR, dtype: float64
"""
df = self.get_rolling_cumulative_return(window=time_frame).loc[:, self.symbols]
return Frame.get_cvar_historic(df, level)
@property
def drawdowns(self) -> pd.DataFrame:
"""
Calculate drawdowns time series for the assets.
The drawdown is the percent decline from a previous peak in wealth index.
Returns
-------
DataFrame
Time series of drawdowns.
See Also
--------
risk_monthly : Calculate montly risk for each asset.
risk_annual : Calculate annualized risks.
semideviation_monthly : Calculate semideviation monthly values.
semideviation_annual : Calculate semideviation annualized values.
get_var_historic : Calculate historic Value at Risk (VaR).
get_cvar_historic : Calculate historic Conditional Value at Risk (CVaR).
Examples
--------
>>> import matplotlib.pyplot as plt
>>> al = ok.AssetList(['SPY.US', 'BND.US'], last_date='2021-08')
>>> al.drawdowns.plot()
>>> plt.show()
"""
return Frame.get_drawdowns(self.assets_ror)
@property
def recovery_periods(self) -> pd.Series:
"""
Calculate the longest recovery periods for the assets.
The recovery period (drawdown duration) is the number of months to reach the value of the last maximum.
Returns
-------
Series
Max recovery period for each asset (in months).
See Also
--------
drawdowns : Calculate drawdowns time series.
Notes
-----
If the last asset maximum value is not recovered NaN is returned.
The largest recovery period does not necessary correspond to the max drawdown.
Examples
--------
>>> x = ok.AssetList(['SPY.US', 'AGG.US'])
>>> x.recovery_periods
SPY.US 52
AGG.US 15
dtype: int32
"""
cummax = self.wealth_indexes.cummax()
growth = cummax.pct_change()[1:]
max_recovery_periods = pd.Series(dtype=int)
for name in self.symbols:
namespace = name.split(".", 1)[-1]
if namespace == 'INFL':
continue
s = growth[name]
s1 = s.where(s == 0).notnull().astype(int)
s1_1 = s.where(s == 0).isnull().astype(int).cumsum()
s2 = s1.groupby(s1_1).cumsum()
# Max recovery period date should not be in the border (it's not recovered)
max_period = s2.max() if s2.idxmax().to_timestamp() != self.last_date else np.NAN
ser = pd.Series(max_period, index=[name])
max_recovery_periods = max_recovery_periods.append(ser)
return max_recovery_periods
def get_cagr(self, period: Optional[int] = None, real: bool = False) -> pd.Series:
"""
Calculate assets Compound Annual Growth Rate (CAGR) for a given trailing period.
Compound annual growth rate (CAGR) is the rate of return that would be required for an investment to grow from
its initial to its final value, assuming all incomes were reinvested.
Inflation adjusted annualized returns (real CAGR) are shown with `real=True` option.
Annual inflation value is calculated for the same period if inflation=True in the AssetList.
Parameters
----------
period: int, optional
CAGR trailing period in years. None for the full time CAGR.
real: bool, default False
CAGR is adjusted for inflation (real CAGR) if True.
AssetList should be initiated with Inflation=True for real CAGR.
Returns
-------
Series
CAGR values for each asset and annualized inflation (optional).
See Also
--------
get_rolling_cagr : Calculate rolling CAGR.
Notes
-----
CAGR is not defined for periods less than 1 year (NaN values are returned).
Examples
--------
>>> x = ok.AssetList()
>>> x.get_cagr(period=5)
SPY.US 0.1510
USD.INFL 0.0195
dtype: float64
To get inflation adjusted return (real annualized return) add `real=True` option:
>>> x = ok.AssetList(['EURUSD.FX', 'CNYUSD.FX'], inflation=True)
>>> x.get_cagr(period=5, real=True)
EURUSD.FX 0.000439
CNYUSD.FX -0.017922
dtype: float64
"""
df = self._add_inflation()
dt0 = self.last_date
if period is None:
dt = self.first_date
else:
self._validate_period(period)
dt = Date.subtract_years(dt0, period)
cagr = Frame.get_cagr(df[dt:])
if real:
if not hasattr(self, "inflation"):
raise ValueError(
"Real CAGR is not defined. Set inflation=True in AssetList to calculate it."
)
mean_inflation = Frame.get_cagr(self.inflation_ts[dt:])
cagr = (1.0 + cagr) / (1.0 + mean_inflation) - 1.0
cagr.drop(self.inflation, inplace=True)
return cagr
def get_rolling_cagr(self, window: int = 12, real: bool = False) -> pd.DataFrame:
"""
Calculate rolling CAGR for each asset.
Compound annual growth rate (CAGR) is the rate of return that would be required for an investment to grow from
its initial to its final value, assuming all incomes were reinvested.
Inflation adjusted annualized returns (real CAGR) are shown with `real=True` option.
Parameters
----------
window : int, default 12
Size of the moving window in months. Window size should be at least 12 months for CAGR.
real: bool, default False
CAGR is adjusted for inflation (real CAGR) if True.
AssetList should be initiated with Inflation=True for real CAGR.
Returns
-------
DataFrame
Time series of rolling CAGR and mean inflation (optionally).
See Also
--------
get_rolling_cagr : Calculate rolling CAGR.
get_cagr : Calculate CAGR.
get_rolling_cumulative_return : Calculate rolling cumulative return.
annual_return : Calculate annualized mean return (arithmetic mean).
Notes
-----
CAGR is not defined for periods less than 1 year (NaN values are returned).
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = ok.AssetList(['DXET.XETR', 'DBXN.XETR'], ccy='EUR', inflation=True)
>>> x.get_rolling_cagr(window=5*12).plot()
>>> plt.show()
For inflation adjusted rolling CAGR add 'real=True' option:
>>> x.get_rolling_cagr(window=5*12, real=True).plot()
>>> plt.show()
"""
df = self._add_inflation()
if real:
df = self._make_real_return_time_series(df)
return Frame.get_rolling_fn(df, window=window, fn=Frame.get_cagr)
def get_cumulative_return(
self, period: Union[str, int, None] = None, real: bool = False
) -> pd.Series:
"""
Calculate cumulative return over a given trailing period for each asset.
The cumulative return is the total change in the asset price during the investment period.
Inflation adjusted cumulative returns (real cumulative returns) are shown with `real=True` option.
Annual inflation data is calculated for the same period if `inflation=True` in the AssetList.
Parameters
----------
period: str, int or None, default None
Trailing period in years. Period should be more then 0.
None - full time cumulative return.
'YTD' - (Year To Date) period of time beginning the first day of the calendar year up to the last month.
real: bool, default False
Cumulative return is adjusted for inflation (real cumulative return) if True.
AssetList should be initiated with `Inflation=True` for real cumulative return.
Returns
-------
Series
Cumulative return values for each asset and cumulative inflation (if inflation=True in AssetList).
See Also
--------
get_rolling_cagr : Calculate rolling CAGR.
get_cagr : Calculate CAGR.
get_rolling_cumulative_return : Calculate rolling cumulative return.
annual_return : Calculate annualized mean return (arithmetic mean).
Examples
--------
>>> x = ok.AssetList(['MCFTR.INDX'], ccy='RUB')
>>> x.get_cumulative_return(period='YTD')
MCFTR.INDX 0.1483
RUB.INFL 0.0485
dtype: float64
"""
df = self._add_inflation()
dt0 = self.last_date
if period is None:
dt = self.first_date
elif str(period).lower() == "ytd":
year = dt0.year
dt = str(year)
else:
self._validate_period(period)
dt = Date.subtract_years(dt0, period)
cr = Frame.get_cumulative_return(df[dt:])
if real:
if not hasattr(self, "inflation"):
raise ValueError(
"Real cumulative return is not defined (no inflation information is available)."
"Set inflation=True in AssetList to calculate it."
)
cumulative_inflation = Frame.get_cumulative_return(self.inflation_ts[dt:])
cr = (1.0 + cr) / (1.0 + cumulative_inflation) - 1.0
cr.drop(self.inflation, inplace=True)
return cr
def get_rolling_cumulative_return(
self, window: int = 12, real: bool = False
) -> pd.DataFrame:
"""
Calculate rolling cumulative return for each asset.
The cumulative return is the total change in the asset price.
Parameters
----------
window : int, default 12
Size of the moving window in months.
real: bool, default False
Cumulative return is adjusted for inflation (real cumulative return) if True.
AssetList should be initiated with `Inflation=True` for real cumulative return.
Returns
-------
DataFrame
Time series of rolling cumulative return.
See Also
--------
get_rolling_cagr : Calculate rolling CAGR.
get_cagr : Calculate CAGR.
get_cumulative_return : Calculate cumulative return.
annual_return : Calculate annualized mean return (arithmetic mean).
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = ok.AssetList(['DXET.XETR', 'DBXN.XETR'], ccy='EUR', inflation=True)
>>> x.get_rolling_cumulative_return(window=5*12).plot()
>>> plt.show()
For inflation adjusted rolling cumulative return add 'real=True' option:
>>> x.get_rolling_cumulative_return(window=5*12, real=True).plot()
>>> plt.show()
"""
df = self._add_inflation()
if real:
df = self._make_real_return_time_series(df)
return Frame.get_rolling_fn(
df, window=window, fn=Frame.get_cumulative_return, window_below_year=True
)
@property
def annual_return_ts(self) -> pd.DataFrame:
"""
Calculate annual rate of return time series for each asset.
Rate of return is calculated for each calendar year.
Returns
-------
DataFrame
Calendar annual rate of return time series.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> al = ok.AssetList(['SPY.US', 'BND.US'], last_date='2021-08')
>>> al.annual_return_ts.plot(kind='bar')
>>> plt.show()
"""
return Frame.get_annual_return_ts_from_monthly(self.assets_ror)
def describe(
self, years: Tuple[int, ...] = (1, 5, 10), tickers: bool = True
) -> pd.DataFrame:
"""
Generate descriptive statistics for a list of assets.
Statistics includes:
- YTD (Year To date) compound return
- CAGR for a given list of periods
- LTM Dividend yield - last twelve months dividend yield
Risk metrics (full period):
- risk (standard deviation)
- CVAR (timeframe is 1 year)
- max drawdowns (and dates of the drawdowns)
Statistics also shows for each asset:
- inception date - first date available for each asset
- last asset date - available for each asset date
- Common last data date - common for the asset list data (may be set by last_date manually)
Parameters
----------
years : tuple of (int,), default (1, 5, 10)
List of periods for CAGR.
tickers : bool, default True
Defines whether show tickers (True) or assets names in the header.
Returns
-------
DataFrame
Table of descriptive statistics for a list of assets.
See Also
--------
get_cumulative_return : Calculate cumulative return.
get_cagr : Calculate assets Compound Annual Growth Rate (CAGR).
dividend_yield : Calculate dividend yield (LTM).
risk_annual : Return annualized risks (standard deviation).
get_cvar : Calculate historic Conditional Value at Risk (CVAR, expected shortfall).
drawdowns : Calculate drawdowns.
Examples
--------
>>> al = ok.AssetList(['SPY.US', 'AGG.US'], last_date='2021-08')
>>> al.describe(years=[1, 10, 15])
property period AGG.US SPY.US inflation
0 Compound return YTD -0.005620 0.180519 0.048154
1 CAGR 1 years -0.007530 0.363021 0.053717
2 CAGR 10 years 0.032918 0.152310 0.019136
3 CAGR 15 years 0.043013 0.107598 0.019788
4 CAGR 17 years, 10 months 0.039793 0.107972 0.022002
5 Dividend yield LTM 0.018690 0.012709 NaN
6 Risk 17 years, 10 months 0.037796 0.158301 NaN
7 CVAR 17 years, 10 months 0.023107 0.399398 NaN
"""
description = pd.DataFrame()
dt0 = self.last_date
df = self._add_inflation()
# YTD return
ytd_return = self.get_cumulative_return(period="YTD")
row = ytd_return.to_dict()
row.update(period="YTD", property="Compound return")
description = description.append(row, ignore_index=True)
# CAGR for a list of periods
if self.pl.years >= 1:
for i in years:
dt = Date.subtract_years(dt0, i)
if dt >= self.first_date:
row = self.get_cagr(period=i).to_dict()
else:
row = {x: None for x in df.columns}
row.update(period=f"{i} years", property="CAGR")
description = description.append(row, ignore_index=True)
# CAGR for full period
row = self.get_cagr(period=None).to_dict()
row.update(period=self._pl_txt, property="CAGR")
description = description.append(row, ignore_index=True)
# Dividend Yield
row = self.assets_dividend_yield.iloc[-1].to_dict()
row.update(period="LTM", property="Dividend yield")
description = description.append(row, ignore_index=True)
# risk for full period
row = self.risk_annual.to_dict()
row.update(period=self._pl_txt, property="Risk")
description = description.append(row, ignore_index=True)
# CVAR
if self.pl.years >= 1:
row = self.get_cvar_historic().to_dict()
row.update(period=self._pl_txt, property="CVAR")
description = description.append(row, ignore_index=True)
# max drawdowns
row = self.drawdowns.min().to_dict()
row.update(period=self._pl_txt, property="Max drawdowns")
description = description.append(row, ignore_index=True)
# max drawdowns dates
row = self.drawdowns.idxmin().to_dict()
row.update(period=self._pl_txt, property="Max drawdowns dates")
description = description.append(row, ignore_index=True)
# inception dates
row = {}
for ti in self.symbols:
# short_ticker = ti.split(".", 1)[0]
value = self.assets_first_dates[ti].strftime("%Y-%m")
row.update({ti: value})
row.update(period=None, property="Inception date")
if hasattr(self, "inflation"):
row.update({self.inflation: self.inflation_first_date.strftime("%Y-%m")})
description = description.append(row, ignore_index=True)
# last asset date
row = {}
for ti in self.symbols:
# short_ticker = ti.split(".", 1)[0]
value = self.assets_last_dates[ti].strftime("%Y-%m")
row.update({ti: value})
row.update(period=None, property="Last asset date")
if hasattr(self, "inflation"):
row.update({self.inflation: self.inflation_last_date.strftime("%Y-%m")})
description = description.append(row, ignore_index=True)
# last data date
row = {x: self.last_date.strftime("%Y-%m") for x in df.columns}
row.update(period=None, property="Common last data date")
description = description.append(row, ignore_index=True)
# rename columns
if hasattr(self, "inflation"):
description.rename(columns={self.inflation: "inflation"}, inplace=True)
description = Frame.change_columns_order(
description, ["inflation"], position="last"
)
description = Frame.change_columns_order(
description, ["property", "period"], position="first"
)
if not tickers:
for ti in self.symbols:
# short_ticker = ti.split(".", 1)[0]
description.rename(columns={ti: self.names[ti]}, inplace=True)
return description
@property
def mean_return(self) -> pd.Series:
"""
Calculate annualized mean return (arithmetic mean) for the rate of return time series (each asset).
Mean return calculated for the full history period. Arithmetic mean for the inflation is also shown
if there is an `inflation=True` option in AssetList.
Returns
-------
Series
Mean return value for each asset.
Examples
--------
>>> x = ok.AssetList(['MCFTR.INDX', 'RGBITR.INDX'], ccy='RUB', inflation=True)
>>> x.mean_return
MCFTR.INDX 0.209090
RGBITR.INDX 0.100133
RUB.INFL 0.081363
dtype: float64
"""
df = self._add_inflation()
mean = df.mean()
return Float.annualize_return(mean)
@property
def real_mean_return(self) -> pd.Series:
"""
Calculate annualized real mean return (arithmetic mean) for the rate of return time series (each assets).
Real rate of return is adjusted for inflation. Real return is defined if
there is an `inflation=True` option in AssetList.
Returns
-------
Series
Mean real return value for each asset.
Examples
--------
>>> x = ok.AssetList(['MCFTR.INDX', 'RGBITR.INDX'], ccy='RUB', inflation=True)
>>> x.real_mean_return
MCFTR.INDX 0.118116
RGBITR.INDX 0.017357
dtype: float64
"""
# TODO: make a single method with mean_return
if not hasattr(self, "inflation"):
raise ValueError(
"Real Return is not defined. Set inflation=True to calculate."
)
df = pd.concat(
[self.assets_ror, self.inflation_ts], axis=1, join="inner", copy="false"
)
infl_mean = Float.annualize_return(self.inflation_ts.values.mean())
ror_mean = Float.annualize_return(df.loc[:, self.symbols].mean())
return (1.0 + ror_mean) / (1.0 + infl_mean) - 1.0
@property
def dividends_annual(self) -> pd.DataFrame:
"""
Return calendar year dividends sum time series for each asset.
Returns
-------
DataFrame
Annual dividends time series for each asset.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = ok.AssetList(['T.US', 'XOM.US'], first_date='2010-01', last_date='2020-12')
>>> x.dividends_annual.plot(kind='bar')
>>> plt.show()
"""
return self._get_assets_dividends().resample("Y").sum()
@property
def dividend_growing_years(self) -> pd.DataFrame:
"""
Return the number of years when the annual dividend was growing for each asset.
Returns
-------
DataFrame
Dividend growth length periods time series for each asset.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = ok.AssetList(['T.US', 'XOM.US'], first_date='1984-01', last_date='1994-12')
>>> x.dividend_growing_years.plot(kind='bar')
>>> plt.show()
"""
div_growth = self.dividends_annual.pct_change()[1:]
df = pd.DataFrame()
for name in div_growth:
s = div_growth[name]
s1 = s.where(s > 0).notnull().astype(int)
s1_1 = s.where(s > 0).isnull().astype(int).cumsum()
s2 = s1.groupby(s1_1).cumsum()
df = pd.concat([df, s2], axis=1, copy="false")
return df
@property
def dividend_paying_years(self) -> pd.DataFrame:
"""
Return the number of years of consecutive dividend payments for each asset.
Returns
-------
DataFrame
Dividend payment period length time series for each asset.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = ok.AssetList(['T.US', 'XOM.US'], first_date='1984-01', last_date='1994-12')
>>> x.dividend_paying_years.plot(kind='bar')
>>> plt.show()
"""
div_annual = self.dividends_annual
frame = pd.DataFrame()
df = frame
for name in div_annual:
s = div_annual[name]
s1 = s.where(s != 0).notnull().astype(int)
s1_1 = s.where(s != 0).isnull().astype(int).cumsum()
s2 = s1.groupby(s1_1).cumsum()
df = | pd.concat([df, s2], axis=1, copy="false") | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 17:32:38 2019
@author: Saint8312
"""
import numpy as np
import pandas as pd
import sys, os
import time
import multiprocessing
import itertools
import pickle
'''
math functions
'''
f_euclid_dist = lambda a,b: np.linalg.norm(a-b)
def f_h_step(x, a):
return 1 if (x<=a) else 0
f_y = lambda k : -np.log10(k)
def y_data_processor(path):
'''
Create dataframes of log_10^y
'''
mol_units = {'uM':1.e-6, 'pM':1.e-12, 'fM':1.e-15, 'nM':1.e-9, 'mM':1.e-3}
#load the index file
l = []
with open(path, 'r') as f:
for line in f:
if not line.startswith('#'):
l.append((line.rstrip()).split())
df_idx = (pd.DataFrame(l)).rename(columns={0:'id',3:'k'})
#generate the -log_10k values
op_tokens = ['=','~','>','<']
logys = np.zeros(df_idx.shape[0])
for i in range(df_idx.shape[0]):
string = df_idx.loc[i]['k']
for s in string:
if s in op_tokens:
split_str = string.split(s)
break
logys[i] = f_y( float(split_str[-1][:-2]) * mol_units[split_str[-1][-2:]] )
df_idx["log_y"] = logys
return df_idx
def protein_interaction(df_protein_A, df_protein_B, atom_types, cutoff):
'''
calculate the combination of euclidian distance and heaviside step between chains in a protein,
e.g chains=[A,B,C,D], hence the interactions are: [[A-B],[A-C],[A-D],[B-C],[B-D],[C-D]]
'atom_types' are the type of atoms used for calculation
'cutoff' is the distance cutoff between atoms for heaviside step function (in Angstrom)
'''
type_len = len(atom_types)
x_vector = np.zeros(type_len**2)
idx = 0
for a_type in atom_types:
for b_type in atom_types:
#calculate the interaction of each atoms:
sum_interaction = 0
a_atoms = df_protein_A.loc[df_protein_A['atom_type'] == a_type]
b_atoms = df_protein_B.loc[df_protein_B['atom_type'] == b_type]
for i in range(a_atoms.shape[0]):
for j in range(b_atoms.shape[0]):
#get the (x,y,z):
a_atom = a_atoms.iloc[i]
b_atom = b_atoms.iloc[j]
a_coord = np.array([float(a_atom['x_coor']), float(a_atom['y_coor']), float(a_atom['z_coor'])])
b_coord = np.array([float(b_atom['x_coor']), float(b_atom['y_coor']), float(b_atom['z_coor'])])
#calculate the euclidean distance and heaviside step value:
sum_interaction += f_h_step(x=f_euclid_dist(a_coord, b_coord), a=cutoff)
x_vector[idx] = sum_interaction
idx+=1
print(x_vector)
return x_vector
def data_processing(path,id_name, atom_types, cutoff):
#dataframe loader:
path_file = path+'/'+id_name
l =[]
with open(path_file, 'r') as f:
for line in f:
if line.startswith('ATOM'):
clean_line = (line.rstrip()).split()
#check for alignment mistakes within data, a row with spacing alignment error has 11 length after splitted by whitespace
if len(clean_line) == 11:
#split the 2nd last column by the 4th index (this inference is according to PDB file formatting)
split = [clean_line[-2][:4], clean_line[-2][4:]]
clean_line[-2] = split[1]
clean_line.insert(-2, split[0])
#check if coordinate data collumns are collided (most likely happens between x and y coor)
if len(clean_line[6])>=13:
split = [clean_line[6][:-8], clean_line[6][-8:]]
last_elem = clean_line.pop()
clean_line[-1] = last_elem
clean_line.insert(6, split[0])
clean_line[7] = split[1]
if len(clean_line[7])>=13:
split = [clean_line[7][:-8], clean_line[7][-8:]]
last_elem = clean_line.pop()
clean_line[-1] = last_elem
clean_line.insert(7, split[0])
clean_line[8] = split[1]
l.append(clean_line)
elif line.startswith('TER'):
clean_line = (line.rstrip()).split()
l.append(clean_line)
elif line.startswith('ENDMDL'):
break
df_atoms = (pd.DataFrame(l)).rename(columns={0:'record', 6:'x_coor', 7:'y_coor', 8:'z_coor', 11:'atom_type'})
#dataframe splitter:
l_df = []
last_idx = 0
for idx in df_atoms.index[df_atoms['record'] == 'TER'].tolist():
l_df.append(df_atoms.iloc[last_idx:idx])
last_idx = idx+1
#vector calculation:
x_vector = np.zeros(len(atom_types)**2)
length = len(l_df)
for i in range(length):
for j in range(length):
if j>i:
#sum each chain interaction values:
print('protein chain :', i, j)
x_vector += protein_interaction(l_df[i], l_df[j], atom_types, cutoff)
return {'id':id_name, 'x_vector':x_vector}
###########################################
'''
multiprocessing functions
'''
def f_euc_mp(params):
return np.linalg.norm(params[0]-params[1])
def f_heaviside_mp(params):
return 1 if(params[0]<=params[1]) else 0
def protein_interaction_mp(df_protein_A, df_protein_B, atom_types, cutoff, pool):
type_len = len(atom_types)
x_vector = np.zeros(type_len**2)
idx = 0
for a_type in atom_types:
for b_type in atom_types:
#calculate the interaction of each atoms:
sum_interaction = 0
a_atoms = df_protein_A.loc[df_protein_A['atom_type'] == a_type].to_dict('records')
b_atoms = df_protein_B.loc[df_protein_B['atom_type'] == b_type].to_dict('records')
a_coords = np.array([[a_atom['x_coor'], a_atom['y_coor'], a_atom['z_coor']] for a_atom in a_atoms], dtype=float)
b_coords = np.array([[b_atom['x_coor'], b_atom['y_coor'], b_atom['z_coor']] for b_atom in b_atoms], dtype=float)
paramlist = list(itertools.product(a_coords, b_coords))
euclid_dists = pool.map(f_euc_mp, paramlist)
euclid_dists = np.array(list(euclid_dists))
paramlist = list(itertools.product(euclid_dists, [cutoff]))
heavisides = pool.map(f_heaviside_mp, paramlist)
heavisides = np.array(list(heavisides))
sum_interaction = np.sum(heavisides)
x_vector[idx] = sum_interaction
idx+=1
print(x_vector)
return x_vector
def data_multi_processing(path,id_name, atom_types, cutoff, pool):
#dataframe loader:
path_file = path+'/'+id_name
l =[]
with open(path_file, 'r') as f:
for line in f:
if line.startswith('ATOM'):
clean_line = (line.rstrip()).split()
#check for alignment mistakes within data, a row with spacing alignment error has 11 length after splitted by whitespace
if len(clean_line) == 11:
#split the 2nd last column by the 4th index (this inference is according to PDB file formatting)
split = [clean_line[-2][:4], clean_line[-2][4:]]
clean_line[-2] = split[1]
clean_line.insert(-2, split[0])
#check if coordinate data collumns are collided (most likely happens between x and y coor)
if len(clean_line[6])>=13:
split = [clean_line[6][:-8], clean_line[6][-8:]]
last_elem = clean_line.pop()
clean_line[-1] = last_elem
clean_line.insert(6, split[0])
clean_line[7] = split[1]
if len(clean_line[7])>=13:
split = [clean_line[7][:-8], clean_line[7][-8:]]
last_elem = clean_line.pop()
clean_line[-1] = last_elem
clean_line.insert(7, split[0])
clean_line[8] = split[1]
l.append(clean_line)
elif line.startswith('TER'):
clean_line = (line.rstrip()).split()
l.append(clean_line)
elif line.startswith('ENDMDL'):
break
df_atoms = ( | pd.DataFrame(l) | pandas.DataFrame |
#!/usr/bin/env python
"""
CEP to coordinates (latitude and longitude).
This script receives list of brazilian postal code and returns the
latitude and longitude coordinates related to this postal code, based
on information provided by Open Street Map (OSM).
"""
import sys # basic system library
import urllib # to open urls
import json # to get lat and lon from open street map
import logging # to record log events
# log config is because pycep_correios generates a config and it
# couldn't be overwritten later before python 3.8
logging.basicConfig(filename='exec.log', level=logging.DEBUG,
format='%(asctime)s %(levelname)s: %(message)s', datefmt='%b %d %H:%M:%S')
import pycep_correios # to get venue from correios
import pandas as pd # to handle with data
from tqdm import tqdm # te quiero demasiado <3 (shows progress bar)
def get_name(cep):
"""Get the street name from postal office website."""
try:
address = pycep_correios.get_address_from_cep(cep)
except (ValueError, KeyError, pycep_correios.exceptions.BaseException):
return False
return address
def get_json(query, cep):
"""Get the coordinates based on the street name."""
query_quote = "'" + query + " " + cep + "'" # to search on OSM
query_parse = urllib.parse.quote(
query_quote.encode('utf-8')) # convert non-url characters
url = "https://nominatim.openstreetmap.org/search?q=" \
+ query_parse + "&format=json" # url to get lat lon data
try: # nominatim returns a json, the first data is stored
with urllib.request.urlopen(url) as data:
obj = json.loads(data.read().decode())[0]
except IndexError: # if there is no data
if cep == "": # if already tried twice
return False, False # return false so no latlon were got
else: # if its second time then search again without cep value
return get_json(query, "")
else: # if everything's ok then returns latlon values
return obj['lat'], obj['lon']
def main(filename):
"""Run the main function of script."""
logging.info(f"Loading file {filename}")
df = | pd.read_excel(filename) | pandas.read_excel |
# -*- coding: utf-8 -*-
import logging
import traceback
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from sklearn import linear_model
from Eturb import Eturb
from Bturb import Bturb
from turbine_optimizer import objective, contraint, optimizer
# 汽轮发电机组停开机状态判断进汽量
ETURB_M1_MACHINE_STATUS = 10
ETURB_M2_MACHINE_STATUS = 10
BTURB_M1_MACHINE_STATUS = 10
def create_turbine_instance(steam_flow_in_array):
# 建立汽轮发电机组示例
eturb_m1 = Eturb(
instance = "eturb_m1",
steam_flow_in = steam_flow_in_array[0],
steam_flow_in_threshold = ETURB_M1_MACHINE_STATUS
)
eturb_m2 = Eturb(
instance = "eturb_m2",
steam_flow_in = steam_flow_in_array[1],
steam_flow_in_threshold = ETURB_M2_MACHINE_STATUS
)
bturb_m1 = Bturb(
instance = "bturb_m1",
steam_flow_in = steam_flow_in_array[2],
steam_flow_in_threshold = BTURB_M1_MACHINE_STATUS
)
# 判断汽机开停炉状态
eturb_m1.calculate_machine_statu()
eturb_m2.calculate_machine_statu()
bturb_m1.calculate_machine_statu()
return eturb_m1, eturb_m2, bturb_m1
def turbine_optimizer_main_model(hp_steam_dayprice,
electricity_price_ext,
steamflow_pred_avg,
electricity_power_pred_avg,
lp_steam_throtte,
steam_flow_in_array, steam_flow_side_array, electricity_generation_array,
steam_in_upper_limit_array, steam_in_lower_limit_array, steam_out_upper_limit_array, steam_out_lower_limit_array,
electricity_power_ext_max,
electricity_power_ext):
# ---------------------------------
# 建立汽轮发电机组实例
# ---------------------------------
eturb_m1, eturb_m2, bturb_m1 = create_turbine_instance(steam_flow_in_array)
# eturb_m1
eturb_m1.steam_flow_side = steam_flow_side_array[0]
eturb_m1.electricity_generation = electricity_generation_array[0]
eturb_m1.machine_steam_in_upper_limit = steam_in_upper_limit_array[0] * eturb_m1.machine_status
eturb_m1.machine_steam_in_lower_limit = steam_in_lower_limit_array[0] * eturb_m1.machine_status
eturb_m1.machine_steam_ext_upper_limit = steam_out_upper_limit_array[0] * eturb_m1.machine_status
eturb_m1.machine_steam_ext_lower_limit = steam_out_lower_limit_array[0] * eturb_m1.machine_status
# eturb_m2
eturb_m2.steam_flow_side = steam_flow_side_array[1]
eturb_m2.electricity_generation = electricity_generation_array[1]
eturb_m2.machine_steam_in_upper_limit = steam_in_upper_limit_array[1] * eturb_m2.machine_status
eturb_m2.machine_steam_in_lower_limit = steam_in_lower_limit_array[1] * eturb_m2.machine_status
eturb_m2.machine_steam_ext_upper_limit = steam_out_upper_limit_array[1] * eturb_m2.machine_status
eturb_m2.machine_steam_ext_lower_limit = steam_out_lower_limit_array[1] * eturb_m2.machine_status
# bturb_m1
bturb_m1.electricity_generation = electricity_generation_array[2]
bturb_m1.machine_steam_in_upper_limit = steam_in_upper_limit_array[2] * bturb_m1.machine_status
bturb_m1.machine_steam_in_lower_limit = steam_in_lower_limit_array[2] * bturb_m1.machine_status
# 估计汽轮发电机组综合效率
eturb_m1.effect_m_g(eturb_m1.machine_status)
eturb_m2.effect_m_g(eturb_m2.machine_status)
bturb_m1.effect_m_g(bturb_m1.machine_status)
# 高压蒸汽进汽总量
hp_steam = eturb_m1.steam_flow_in + eturb_m2.steam_flow_in + bturb_m1.steam_flow_in
# ---------------------------------
# 构造参数
# ---------------------------------
# 目标变量参数
args_obj = (hp_steam_dayprice, electricity_price_ext, hp_steam, electricity_power_ext)
# 约束条件参数
args_con = (steamflow_pred_avg, electricity_power_pred_avg, lp_steam_throtte,
eturb_m1.alpha_1, eturb_m1.alpha_2, eturb_m1.beta,
eturb_m2.alpha_1, eturb_m2.alpha_2, eturb_m2.beta,
bturb_m1.alpha, bturb_m1.beta,
eturb_m1.machine_steam_in_lower_limit, eturb_m1.machine_steam_in_upper_limit,
eturb_m2.machine_steam_in_lower_limit, eturb_m2.machine_steam_in_upper_limit,
bturb_m1.machine_steam_in_lower_limit, bturb_m1.machine_steam_in_upper_limit,
eturb_m1.machine_steam_ext_lower_limit, eturb_m1.machine_steam_ext_upper_limit,
eturb_m2.machine_steam_ext_lower_limit, eturb_m2.machine_steam_ext_upper_limit,
electricity_power_ext_max, hp_steam,
eturb_m1.electricity_generation, eturb_m2.electricity_generation, bturb_m1.electricity_generation,
electricity_power_ext,
eturb_m1.steam_flow_in, eturb_m2.steam_flow_in, bturb_m1.steam_flow_in,
eturb_m1.steam_flow_side, eturb_m2.steam_flow_side)
# 决策变量初值
x0 = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# ---------------------------------
# 优化算法
# ---------------------------------
# 优化算法
result = optimizer(args_obj, args_con, x0)
# 优化算法结果
object_value_min = result.fun
optim_status = result.success
optim_result = result.x
# ---------------------------------
# 优化结果处理
# ---------------------------------
if optim_status:
logging.error("=====================")
logging.error("算法收敛...")
logging.error("=====================")
# 建议外购电
electricity_power_ext_opt = optim_result[1] + electricity_power_ext
# 建议发电量
eturb_m1_electricity_machine_opt = optim_result[2] + eturb_m1.electricity_generation
eturb_m2_electricity_machine_opt = optim_result[3] + eturb_m2.electricity_generation
bturb_m1_electricity_machine_opt = optim_result[4] + bturb_m1.electricity_generation
# 建议进汽量
eturb_m1_hp_steam_machine_opt = optim_result[5] + eturb_m1.steam_flow_in
eturb_m2_hp_steam_machine_opt = optim_result[6] + eturb_m2.steam_flow_in
bturb_m1_hp_steam_machine_opt = optim_result[7] + bturb_m1.steam_flow_in
# 建议抽汽量
eturb_m1_lp_steam_machine_opt = optim_result[8] + eturb_m1.steam_flow_side
eturb_m2_lp_steam_machine_opt = optim_result[9] + eturb_m2.steam_flow_side
# 进汽量变化值
eturb_m1_steam_flow_in_delta = optim_result[5]
eturb_m2_steam_flow_in_delta = optim_result[6]
bturb_m1_steam_flow_in_delta = optim_result[7]
# ---------------------------------
# 日志输出
# ---------------------------------
logging.error("优化状态 = %s", optim_status)
logging.error("外购电电价 = %s", electricity_price_ext)
logging.error("高压蒸汽价格 = %s", hp_steam_dayprice)
logging.error("优化数据 = %s", {
"汽机高压蒸汽产进汽量": optim_result[0] + hp_steam,
"生产车间外购电电功率": electricity_power_ext_opt,
"汽机1自发电发电功率": eturb_m1_electricity_machine_opt,
"汽机2自发电发电功率": eturb_m2_electricity_machine_opt,
"汽机3自发电发电功率": bturb_m1_electricity_machine_opt,
"汽机1进汽量": eturb_m1_hp_steam_machine_opt,
"汽机2进汽量": eturb_m2_hp_steam_machine_opt,
"汽机3进汽量": bturb_m1_hp_steam_machine_opt,
"汽机1抽汽量": eturb_m1_lp_steam_machine_opt,
"汽机2抽汽量": eturb_m2_lp_steam_machine_opt,
})
logging.error("实际数据 = %s", {
"汽机高压蒸汽产进汽量-实际": hp_steam,
"生产车间外购电电功率-实际": electricity_power_ext,
"汽机1自发电发电功率-实际": eturb_m1.electricity_generation,
"汽机2自发电发电功率-实际": eturb_m2.electricity_generation,
"汽机3自发电发电功率-实际": bturb_m1.electricity_generation,
"汽机1进汽量-实际": eturb_m1.steam_flow_in,
"汽机2进汽量-实际": eturb_m2.steam_flow_in,
"汽机3进汽量-实际": bturb_m1.steam_flow_in,
"汽机1抽汽量-实际": eturb_m1.steam_flow_side,
"汽机2抽汽量-实际": eturb_m2.steam_flow_side,
})
# 成本计算
logging.error("优化得到的目标函数最小值 = %s", object_value_min)
object_value_actual = hp_steam_dayprice * hp_steam + electricity_price_ext * electricity_power_ext * 1000
logging.error("实际得到的目标函数最小值 = %s", object_value_actual)
else:
logging.error("=====================")
logging.error("算法不收敛...")
logging.error("=====================")
# 高压蒸汽产量
hp_steam_opt = hp_steam
# 外购电建议值
electricity_power_ext_opt = electricity_power_ext
# 建议发电量
eturb_m1_electricity_machine_opt = eturb_m1.electricity_generation
eturb_m2_electricity_machine_opt = eturb_m2.electricity_generation
bturb_m1_electricity_machine_opt = bturb_m1.electricity_generation
# 建议进汽量
eturb_m1_hp_steam_machine_opt = eturb_m1.steam_flow_in
eturb_m2_hp_steam_machine_opt = eturb_m2.steam_flow_in
bturb_m1_hp_steam_machine_opt = bturb_m1.steam_flow_in
# 建议抽汽量
eturb_m1_lp_steam_machine_opt = eturb_m1.steam_flow_side
eturb_m2_lp_steam_machine_opt = eturb_m2.steam_flow_side
# 进汽量变化值
eturb_m1_steam_flow_in_delta = 0
eturb_m2_steam_flow_in_delta = 0
bturb_m1_steam_flow_in_delta = 0
# ---------------------------------
# 日志输出
# ---------------------------------
logging.error("优化状态:%s", optim_status)
logging.error("外购电电价:%s", electricity_price_ext)
logging.error("高压蒸汽价格:%s", hp_steam_dayprice)
logging.error("实际数据 = %s", {
"汽机高压蒸汽产进汽量-实际": hp_steam_opt,
"生产车间外购电电功率-实际": electricity_power_ext,
"汽机1自发电发电功率-实际": eturb_m1.electricity_generation,
"汽机2自发电发电功率-实际": eturb_m2.electricity_generation,
"汽机3自发电发电功率-实际": bturb_m1.electricity_generation,
"汽机1进汽量-实际": eturb_m1.steam_flow_in,
"汽机2进汽量-实际": eturb_m2.steam_flow_in,
"汽机3进汽量-实际": bturb_m1.steam_flow_in,
"汽机1抽汽量-实际": eturb_m1.steam_flow_side,
"汽机2抽汽量-实际": eturb_m2.steam_flow_side,
})
# 成本计算
object_value_min = hp_steam_dayprice * hp_steam_opt + electricity_price_ext * electricity_power_ext_opt * 1000
logging.error("优化得到的目标函数最小值 = %s", object_value_min)
object_value_actual = hp_steam_dayprice * hp_steam + electricity_price_ext * electricity_power_ext * 1000
logging.error("实际得到的目标函数最小值 = %s", object_value_actual)
df = pd.DataFrame({
# "steam_flow_in_eturb_m1": [eturb_m1.steam_flow_in],
# "hp_steam_machine_opt_eturb_m1": [eturb_m1_hp_steam_machine_opt],
# "steam_flow_side_eturb_m1": [eturb_m1.steam_flow_side],
# "lp_steam_machine_opt_eturb_m1": [eturb_m1_lp_steam_machine_opt],
# "electricity_generation_eturb_m1": [eturb_m1.electricity_generation],
# "electricity_machine_opt_eturb_m1": [eturb_m1_electricity_machine_opt],
# "steam_flow_in_eturb_m2": [eturb_m2.steam_flow_in],
# "hp_steam_machine_opt_eturb_m2": [eturb_m2_hp_steam_machine_opt],
# "steam_flow_side_eturb_m2": [eturb_m2.steam_flow_side],
# "lp_steam_machine_opt_eturb_m2": [eturb_m2_lp_steam_machine_opt],
# "electricity_generation_eturb_m2": [eturb_m2.electricity_generation],
# "electricity_machine_opt_eturb_m2": [eturb_m2_electricity_machine_opt],
# "steam_flow_in_eturb": [eturb_m1.steam_flow_in + eturb_m2.steam_flow_in],
# "hp_steam_machine_opt_eturb": [eturb_m1_hp_steam_machine_opt + eturb_m2_hp_steam_machine_opt],
# "steam_flow_side_eturb": [eturb_m1.steam_flow_side + eturb_m2.steam_flow_side],
# "lp_steam_machine_opt_eturb": [eturb_m1_lp_steam_machine_opt + eturb_m2_lp_steam_machine_opt],
# "electricity_generation_eturb": [eturb_m1.electricity_generation + eturb_m2.electricity_generation],
# "electricity_machine_opt_eturb": [eturb_m1_electricity_machine_opt + eturb_m2_electricity_machine_opt],
# "electricity_power_ext": [electricity_power_ext],
# "electricity_power_ext_opt": [electricity_power_ext_opt],
"object_value_min": [object_value_min],
"object_value_actual": [object_value_actual],
"optim_status": [optim_status],
})
return df
def get_result(data):
final_result = pd.DataFrame()
for i in range(len(data)):
df = turbine_optimizer_main_model(
hp_steam_dayprice = 95.788,
electricity_price_ext = data["electricity_price_ext"].iloc[i],
# 算法6
steamflow_pred_avg = data["lp_steam_pred_avg"].iloc[i],
electricity_power_pred_avg = data["electricity_power_pred_avg"].iloc[i],
# 算法8
# steamflow_pred_avg = data["lp_steam_pred_avg_adjust"].iloc[i],
# electricity_power_pred_avg = data["electricity_power_pred_avg_adjust"].iloc[i],
lp_steam_throtte = 0,
steam_flow_in_array = [
data["steam_flow_in_eturb_m1"].iloc[i],
data["steam_flow_in_eturb_m2"].iloc[i],
0
],
steam_flow_side_array = [
data["steam_flow_side_eturb_m1"].iloc[i],
data["steam_flow_side_eturb_m2"].iloc[i]
],
electricity_generation_array = [
data["electricity_generation_eturb_m1"].iloc[i],
data["electricity_generation_eturb_m2"].iloc[i],
0
],
steam_in_upper_limit_array = [90, 90, 75],
steam_in_lower_limit_array = [70, 70, 20],
steam_out_upper_limit_array = [40, 40],
steam_out_lower_limit_array = [15, 15],
electricity_power_ext_max = 8,
electricity_power_ext = data["electricity_power_ext"].iloc[i]
)
final_result = pd.concat([final_result, df], axis = 0)
# final_result["outlet_steam_flow_boiler_m1"] = data["outlet_steam_flow_boiler_m1"].iloc[i]
# final_result["hp_steam_boiler_opt_array_boiler_m1"] = data["hp_steam_boiler_opt_array_boiler_m1"].iloc[i]
# final_result["outlet_steam_flow_boiler_m3"] = data["outlet_steam_flow_boiler_m3"].iloc[i]
# final_result["hp_steam_boiler_opt_array_boiler_m3"] = data["hp_steam_boiler_opt_array_boiler_m3"].iloc[i]
# final_result["outlet_steam_flow_boiler"] = data["outlet_steam_flow_boiler_m1"].iloc[i] + data["outlet_steam_flow_boiler_m3"].iloc[i]
# final_result["hp_steam_boiler_opt_array_boiler"] = data["hp_steam_boiler_opt_array_boiler_m1"].iloc[i] + data["hp_steam_boiler_opt_array_boiler_m3"].iloc[i]
# 算法6
# final_result["steamflow_pred_avg"] = data["lp_steam_pred_avg"].iloc[i]
# final_result["electricity_power_pred_avg"] = data["electricity_power_pred_avg"].iloc[i]
# 算法8
# final_result["steamflow_pred_avg"] = data["lp_steam_pred_avg_adjust"].iloc[i]
# final_result["electricity_power_pred_avg"] = data["electricity_power_pred_avg_adjust"].iloc[i]
# ------------
# wsl
# ------------
final_result.to_excel("/mnt/e/dev/data-analysis/turbine_model/result/test.xlsx")
# ------------
# cam
# ------------
# final_result.to_excel("/Users/zfwang/work/dev/data-analysis/turbine_model/result/溢达数据-算法-06.xlsx")
# final_result.to_excel("/Users/zfwang/work/dev/data-analysis/turbine_model/result/溢达数据-算法-08.xlsx")
def unit_test():
""""""
df = turbine_optimizer_main_model(
hp_steam_dayprice = 95.788,
electricity_price_ext = 0.88,
steamflow_pred_avg = 68,
electricity_power_pred_avg = 19.7,
lp_steam_throtte = 0,
steam_flow_in_array = [
63,
47,
40
],
steam_flow_side_array = [
22.36,
0
],
electricity_generation_array = [
9.17,
9.43,
1.8
],
steam_in_upper_limit_array = [90, 90, 90],
steam_in_lower_limit_array = [0, 0, 0],
steam_out_upper_limit_array = [40, 0],
steam_out_lower_limit_array = [0, 0],
electricity_power_ext_max = 8,
electricity_power_ext = 4
)
def get_result_i(data, i):
df = turbine_optimizer_main_model(
hp_steam_dayprice = 95.788,
electricity_price_ext = data["electricity_price_ext"].iloc[i],
# 算法6
steamflow_pred_avg = data["lp_steam_pred_avg_adjust"].iloc[i],
electricity_power_pred_avg = data["electricity_power_pred_avg_adjust"].iloc[i],
# 算法8
# steamflow_pred_avg = data["lp_steam_pred_avg_adjust"].iloc[i],
# electricity_power_pred_avg = data["electricity_power_pred_avg_adjust"].iloc[i],
lp_steam_throtte = 0,
steam_flow_in_array = [
data["steam_flow_in_eturb_m1"].iloc[i],
data["steam_flow_in_eturb_m2"].iloc[i],
0
],
steam_flow_side_array = [
data["steam_flow_side_eturb_m1"].iloc[i],
data["steam_flow_side_eturb_m2"].iloc[i]
],
electricity_generation_array = [
data["electricity_generation_eturb_m1"].iloc[i],
data["electricity_generation_eturb_m2"].iloc[i],
0
],
steam_in_upper_limit_array = [90, 90, 75],
steam_in_lower_limit_array = [70, 70, 20],
steam_out_upper_limit_array = [40, 40],
steam_out_lower_limit_array = [15, 15],
electricity_power_ext_max = 8,
electricity_power_ext = data["electricity_power_ext"].iloc[i]
)
if __name__ == "__main__":
# ----------
# wsl
# ----------
# data_1109 = pd.read_csv("/mnt/e/dev/data-analysis/turbine_model/data/1109/result-1109.csv")
# data_1110 = pd.read_csv("/mnt/e/dev/data-analysis/turbine_model/data/1110/result-1110.csv")
data_1109_dropna = | pd.read_csv("/mnt/e/dev/data-analysis/turbine_model/data/1109/result-1109_dropna.csv") | pandas.read_csv |
import os
from glob import glob
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from astropy.io import ascii as ap_ascii
from numpy import array as nparr
from astrobase.services.gaia import objectid_search
from mpl_toolkits.axes_grid1 import make_axes_locatable
from stringcheese import pipeline_utils as pu
def get_reference_data():
#
# Table 4 of Douglas, Curtis et al (2019) Praesepe rotation periods, teffs.
#
# Quoting Curtis+2019: Prot for 743 members were amassed from the literature
# and measured from K2 Campaign 5 light curves by Douglas et al. (2017).
# Douglas et al. (2019) crossmatched this list with DR2 and filtered out stars
# that failed membership, multiplicity, and data quality criteria, leaving us
# with 359 single star members.
#
# And following Douglas+2019 table 4 caption for the flags...
#
praesepe_tab = ap_ascii.read("../data/apjab2468t4_mrt.txt")
sel = (
(praesepe_tab['SFlag'] == 'YYYYY')
|
(praesepe_tab['SFlag'] == 'YYY-Y')
)
praesepe_tab = praesepe_tab[sel]
assert len(praesepe_tab) == 359
praesepe_df = praesepe_tab.to_pandas()
#
# Figure 4 of Curtis+2019. Has a "gold sample" of Pleiades members that
# involved some crossmatching, and removal of binaries. I used WebPlotDigitizer
# to measure the rotation periods from that figure (rather than reproduce the
# actual procedure Jason discusses in the text.)
#
pleiades_df = pd.read_csv('../data/pleaides_prot_vs_teff.csv')
return praesepe_df, pleiades_df
def get_my_data(groupid=113, groupname='nan', classifxndate=20190907,
is_field_star_comparison=False):
#
# for anything flagged manually as good (in other words, the rotation
# period found just from the LS peak was OK), get the rotation period and
# teff from the .results file.
#
if is_field_star_comparison:
fs_str = 'field_star_comparison_'
else:
fs_str = ''
classifixndir = (
'../results/manual_classification/'
'{}_{}group{}_name{}_classification/'.
format(classifxndate, fs_str, groupid, groupname)
)
all_paths = glob(os.path.join(classifixndir,'*.png'))
n_paths = len(all_paths)
gd_paths = glob(os.path.join(classifixndir,'*good*.png'))
gd_sourceids = [
np.int64(os.path.basename(p).split("_")[-1].replace('[good].png',''))
for p in gd_paths
]
if len(gd_sourceids)==0:
raise AssertionError('expected some good sourceids')
# now get the LS results
datadir = (
'../results/pkls_statuses_pages/{}group{}_name{}'.
format(fs_str, groupid, groupname)
)
prots, teffs = [], []
for sourceid in gd_sourceids:
status_file = os.path.join(datadir, str(sourceid),
'GLS_rotation_period.results')
if not os.path.exists(status_file):
raise AssertionError('expected {} to exist'.format(status_file))
d = pu.load_status(status_file)
teffs.append(d['lomb-scargle']['teff'])
prots.append(d['lomb-scargle']['ls_period'])
df = pd.DataFrame(
{'teff': teffs, 'prot': prots, 'source_id':gd_sourceids}
)
return df, n_paths
def plot_prot_vs_teff_singlegroup(classifxndate=20190907, groupid=113,
groupname='nan',
is_field_star_comparison=False,
remove_outliers=False):
praesepe_df, pleiades_df = get_reference_data()
group_df, n_paths = get_my_data(
groupid=groupid,
groupname=groupname,
classifxndate=classifxndate,
is_field_star_comparison=is_field_star_comparison
)
kc19_df = pd.read_csv('../data/string_table2.csv')
if remove_outliers:
# remove outliers manually selected from glue (RVs or HR diagram
# offset)
_hr = pd.read_csv(
'../data/kc19_group{}_table1_hr_diagram_weirdos.csv'.
format(groupid)
)
_rv = pd.read_csv(
'../data/kc19_group{}_table1_rv_weirdos.csv'.
format(groupid)
)
outlier_df = | pd.concat((_hr, _rv)) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import accuracy_score, roc_curve, auc
#constants calculated from eda & feature engineering
lead_time_mean = float(np.load('lead_time_mean.npy'))
potential_issue_probability_matrix = | pd.read_csv('potential_issue_probability_matrix.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
| pd.Series([1, 100]) | pandas.Series |
from keras.layers import Input, Embedding, LSTM, Dense, concatenate, Bidirectional
from keras.models import Model, Sequential
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report, \
confusion_matrix, auc, roc_curve, zero_one_loss, accuracy_score
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
def read_r_object(rdatafile):
# RDatafile is a string: "D:/.../file.RData"
import rpy2.robjects as robjects
# from rpy2.robjects import pandas2ri
robjects.r['load'](rdatafile)
lstm_x_bow = robjects.r['lstm_X_bag_of_words']
x_training_bow = robjects.r['X_training_BOW']
return lstm_x_bow, x_training_bow
def read_data(file):
xbo = pd.read_csv(file, encoding='latin-1')
df = | pd.DataFrame(xbo) | pandas.DataFrame |
"""
Provide classes to perform the groupby aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
from __future__ import annotations
import collections
import functools
from typing import (
Generic,
Hashable,
Iterator,
Sequence,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
import pandas._libs.groupby as libgroupby
import pandas._libs.reduction as libreduction
from pandas._typing import (
ArrayLike,
DtypeObj,
F,
FrameOrSeries,
Shape,
final,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
maybe_cast_pointwise_result,
maybe_cast_result_dtype,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_period_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCCategoricalIndex
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import ExtensionArray
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import (
base,
grouper,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
ensure_index,
)
from pandas.core.internals import ArrayManager
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_flattened_list,
get_group_index,
get_group_index_sorter,
get_indexer_dict,
)
class WrappedCythonOp:
"""
Dispatch logic for functions defined in _libs.groupby
"""
def __init__(self, kind: str, how: str):
self.kind = kind
self.how = how
_CYTHON_FUNCTIONS = {
"aggregate": {
"add": "group_add",
"prod": "group_prod",
"min": "group_min",
"max": "group_max",
"mean": "group_mean",
"median": "group_median",
"var": "group_var",
"first": "group_nth",
"last": "group_last",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod",
"cumsum": "group_cumsum",
"cummin": "group_cummin",
"cummax": "group_cummax",
"rank": "group_rank",
},
}
_cython_arity = {"ohlc": 4} # OHLC
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.lru_cache(maxsize=None)
def _get_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, ftype)
if is_numeric:
return f
elif dtype == object:
if "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
"""
Find the appropriate cython function, casting if necessary.
Parameters
----------
values : np.ndarray
is_numeric : bool
Returns
-------
func : callable
values : np.ndarray
"""
how = self.how
kind = self.kind
if how in ["median", "cumprod"]:
# these two only have float64 implementations
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{values.dtype.name}]"
)
func = getattr(libgroupby, f"group_{how}_float64")
return func, values
func = self._get_cython_function(kind, how, values.dtype, is_numeric)
if values.dtype.kind in ["i", "u"]:
if how in ["add", "var", "prod", "mean", "ohlc"]:
# result may still include NaN, so we have to cast
values = ensure_float64(values)
return func, values
def disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
"""
Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython.
"""
how = self.how
if is_numeric:
# never an invalid op for those dtypes, so return early as fastpath
return
if is_categorical_dtype(dtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
# are not setup for dim transforming
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_datetime64_any_dtype(dtype):
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
raise TypeError(f"timedelta64 type does not support {how} operations")
def get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.get(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, 4)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def get_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
else:
if is_numeric_dtype(dtype):
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
class BaseGrouper:
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
group_keys : bool, default True
mutated : bool, default False
indexer : intp array, optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
def __init__(
self,
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
indexer: np.ndarray | None = None,
dropna: bool = True,
):
assert isinstance(axis, Index), axis
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
self.dropna = dropna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
@property
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self) -> int:
return len(self.groupings)
def get_iterator(
self, data: FrameOrSeries, axis: int = 0
) -> Iterator[tuple[Hashable, FrameOrSeries]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, group in zip(keys, splitter):
yield key, group.__finalize__(data, method="groupby")
@final
def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
__finalize__ has not been called for the subsetted objects returned.
"""
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self.groupings[0].grouper
@final
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_list(comp_ids, ngroups, self.levels, self.codes)
@final
def apply(self, f: F, data: FrameOrSeries, axis: int = 0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
result_values = None
if data.ndim == 2 and any(
isinstance(x, ExtensionArray) for x in data._iter_column_arrays()
):
# calling splitter.fast_apply will raise TypeError via apply_frame_axis0
# if we pass EA instead of ndarray
# TODO: can we have a workaround for EAs backed by ndarray?
pass
elif isinstance(data._mgr, ArrayManager):
# TODO(ArrayManager) don't use fast_apply / libreduction.apply_frame_axis0
# for now -> relies on BlockManager internals
pass
elif (
com.get_callable_name(f) not in base.plotting_methods
and isinstance(splitter, FrameSplitter)
and axis == 0
# fast_apply/libreduction doesn't allow non-numpy backed indexes
and not data.index._has_complex_internals
):
try:
sdata = splitter.sorted_data
result_values, mutated = splitter.fast_apply(f, sdata, group_keys)
except IndexError:
# This is a rare case in which re-running in python-space may
# make a difference, see test_apply_mutate.test_mutate_groups
pass
else:
# If the fast apply path could be used we can return here.
# Otherwise we need to fall back to the slow implementation.
if len(result_values) == len(group_keys):
return group_keys, result_values, mutated
if result_values is None:
# result_values is None if fast apply path wasn't taken
# or fast apply aborted with an unexpected exception.
# In either case, initialize the result list and perform
# the slow iteration.
result_values = []
skip_first = False
else:
# If result_values is not None we're in the case that the
# fast apply loop was broken prematurely but we have
# already the result for the first group which we can reuse.
skip_first = True
# This calls DataSplitter.__iter__
zipped = zip(group_keys, splitter)
if skip_first:
# pop the first item from the front of the iterator
next(zipped)
for key, group in zipped:
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1 and isinstance(
self.result_index, ABCCategoricalIndex
):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys)
@property
def codes(self) -> list[np.ndarray]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
return [ping.group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Series:
"""
Compute group sizes.
"""
ids, _, ngroup = self.group_info
if ngroup:
out = np.bincount(ids[ids != -1], minlength=ngroup)
else:
out = []
return Series(out, index=self.result_index, dtype="int64")
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = zip(*(ping.grouper for ping in self.groupings))
index = Index(to_groupby)
return self.axis.groupby(index)
@final
@cache_readonly
def is_monotonic(self) -> bool:
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_codes()
ngroups = len(obs_group_ids)
comp_ids = ensure_platform_int(comp_ids)
return comp_ids, obs_group_ids, ngroups
@final
@cache_readonly
def codes_info(self) -> np.ndarray:
# return the codes of items in original grouped axis
codes, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((codes, self.indexer))
codes = codes[sorter]
return codes
@final
def _get_compressed_codes(self) -> tuple[np.ndarray, np.ndarray]:
all_codes = self.codes
if len(all_codes) > 1:
group_index = get_group_index(all_codes, self.shape, sort=True, xnull=True)
return compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.codes, np.arange(len(ping.group_index))
@final
@cache_readonly
def ngroups(self) -> int:
return len(self.result_index)
@property
def reconstructed_codes(self) -> list[np.ndarray]:
codes = self.codes
comp_ids, obs_ids, _ = self.group_info
return decons_obs_group_ids(comp_ids, obs_ids, self.shape, codes, xnull=True)
@cache_readonly
def result_index(self) -> Index:
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].result_index.rename(self.names[0])
codes = self.reconstructed_codes
levels = [ping.result_index for ping in self.groupings]
return MultiIndex(
levels=levels, codes=codes, verify_integrity=False, names=self.names
)
@final
def get_group_levels(self) -> list[Index]:
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].result_index]
name_list = []
for ping, codes in zip(self.groupings, self.reconstructed_codes):
codes = ensure_platform_int(codes)
levels = ping.result_index.take(codes)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
@final
def _ea_wrap_cython_operation(
self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs
) -> ArrayLike:
"""
If we have an ExtensionArray, unwrap, call _cython_operation, and
re-wrap if appropriate.
"""
# TODO: general case implementation overridable by EAs.
orig_values = values
if is_datetime64tz_dtype(values.dtype) or is_period_dtype(values.dtype):
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalents
values = values.view("M8[ns]")
res_values = self._cython_operation(
kind, values, how, axis, min_count, **kwargs
)
if how in ["rank"]:
# preserve float64 dtype
return res_values
res_values = res_values.astype("i8", copy=False)
result = type(orig_values)(res_values, dtype=orig_values.dtype)
return result
elif is_integer_dtype(values.dtype) or is_bool_dtype(values.dtype):
# IntegerArray or BooleanArray
values = values.to_numpy("float64", na_value=np.nan)
res_values = self._cython_operation(
kind, values, how, axis, min_count, **kwargs
)
dtype = maybe_cast_result_dtype(orig_values.dtype, how)
if isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
return cls._from_sequence(res_values, dtype=dtype)
return res_values
elif | is_float_dtype(values.dtype) | pandas.core.dtypes.common.is_float_dtype |
import sys
import dask
import dask.dataframe as dd
from distributed import Executor
from distributed.utils_test import cluster, loop, gen_cluster
from distributed.collections import (_futures_to_dask_dataframe,
futures_to_dask_dataframe, _futures_to_dask_array,
futures_to_dask_array, _stack, stack)
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from toolz import identity
from tornado import gen
from tornado.ioloop import IOLoop
dfs = [pd.DataFrame({'x': [1, 2, 3]}, index=[0, 10, 20]),
pd.DataFrame({'x': [4, 5, 6]}, index=[30, 40, 50]),
pd.DataFrame({'x': [7, 8, 9]}, index=[60, 70, 80])]
def assert_equal(a, b):
assert type(a) == type(b)
if isinstance(a, pd.DataFrame):
tm.assert_frame_equal(a, b)
elif isinstance(a, pd.Series):
tm.assert_series_equal(a, b)
elif isinstance(a, pd.Index):
| tm.assert_index_equal(a, b) | pandas.util.testing.assert_index_equal |
import datetime
from abc import abstractmethod, ABC
from typing import List, Tuple, Dict
import numpy as np
import pandas as pd
from minotor.constants import DATETIME_ID_FORMAT
from minotor.data_managers.data_types import DataType
class PreprocessorABC(ABC):
def __init__(self):
self.current_ids = None
self.current_dates = None
def preprocess(self, data) -> List[Tuple[str, Dict, str]]:
"""
:param data: data under the input format
:return: A list corresponding to the features under the format : [(feature_name, values, data_type)]
"""
self.current_ids, self.current_dates = self.get_values_infos(data)
return self.get_preprocessed_data(data)
@abstractmethod
def get_values_infos(self, data) -> Tuple[List, List]:
"""
:param data: data under the input format
:return: values ids and values dates
"""
pass
@abstractmethod
def get_preprocessed_data(self, data) -> List[Tuple[str, Dict, str]]:
pass
class NumpyArrayPreprocessor(PreprocessorABC):
def get_values_infos(self, data: np.ndarray) -> Tuple[List, List]:
current_date = datetime.datetime.now()
return [f"{current_date.strftime(DATETIME_ID_FORMAT)}-{i}" for i in range(data.shape[0])], \
[current_date for _ in range(data.shape[0])]
def get_preprocessed_data(self, data: np.ndarray) -> List[Tuple[str, Dict, DataType]]:
data = data.transpose()
return [(f"feature_{i}",
{key: val for key, val in zip(self.current_ids, _replace_nan_with_none(feature_data))},
DataType.type2value(feature_data.dtype))
for i, feature_data in enumerate(data)]
class PandasDataFramePreprocessor(PreprocessorABC):
def get_values_infos(self, data: pd.DataFrame) -> Tuple[List, List]:
current_date = datetime.datetime.now()
return [f"{current_date.strftime(DATETIME_ID_FORMAT)}-{id}" for id in data.index], \
[current_date for _ in range(len(data))]
def get_preprocessed_data(self, data: pd.DataFrame) -> List[Tuple[str, Dict, DataType]]:
return [(col,
{key: val for key, val in zip(self.current_ids, _replace_nan_with_none(data[col].values))},
DataType.type2value(data[col].dtype))
for col in data]
type2preprocessor = {
np.ndarray: NumpyArrayPreprocessor(),
pd.DataFrame: PandasDataFramePreprocessor()
}
def _replace_nan_with_none(array: np.ndarray) -> List:
return [x if not | pd.isna(x) | pandas.isna |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from mars.dataframe.datasource.dataframe import from_pandas
from mars.dataframe.datasource.series import from_pandas as series_from_pandas
from mars.dataframe.merge import concat
from mars.dataframe.utils import sort_dataframe_inplace
def test_merge(setup):
df1 = pd.DataFrame(np.arange(20).reshape((4, 5)) + 1, columns=['a', 'b', 'c', 'd', 'e'])
df2 = pd.DataFrame(np.arange(20).reshape((5, 4)) + 1, columns=['a', 'b', 'x', 'y'])
df3 = df1.copy()
df3.index = pd.RangeIndex(2, 6, name='index')
df4 = df1.copy()
df4.index = pd.MultiIndex.from_tuples([(i, i + 1) for i in range(4)], names=['i1', 'i2'])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
mdf3 = from_pandas(df3, chunk_size=3)
mdf4 = from_pandas(df4, chunk_size=2)
# Note [Index of Merge]
#
# When `left_index` and `right_index` of `merge` is both false, pandas will generate an RangeIndex to
# the final result dataframe.
#
# We chunked the `left` and `right` dataframe, thus every result chunk will have its own RangeIndex.
# When they are contenated we don't generate a new RangeIndex for the result, thus we cannot obtain the
# same index value with pandas. But we guarantee that the content of dataframe is correct.
# merge on index
expected0 = df1.merge(df2)
jdf0 = mdf1.merge(mdf2)
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
# merge on left index and `right_on`
expected1 = df1.merge(df2, how='left', right_on='x', left_index=True)
jdf1 = mdf1.merge(mdf2, how='left', right_on='x', left_index=True)
result1 = jdf1.execute().fetch()
expected1.set_index('a_x', inplace=True)
result1.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1, 0), sort_dataframe_inplace(result1, 0))
# merge on `left_on` and right index
expected2 = df1.merge(df2, how='right', left_on='a', right_index=True)
jdf2 = mdf1.merge(mdf2, how='right', left_on='a', right_index=True)
result2 = jdf2.execute().fetch()
expected2.set_index('a', inplace=True)
result2.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
# merge on `left_on` and `right_on`
expected3 = df1.merge(df2, how='left', left_on='a', right_on='x')
jdf3 = mdf1.merge(mdf2, how='left', left_on='a', right_on='x')
result3 = jdf3.execute().fetch()
expected3.set_index('a_x', inplace=True)
result3.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
# merge on `on`
expected4 = df1.merge(df2, how='right', on='a')
jdf4 = mdf1.merge(mdf2, how='right', on='a')
result4 = jdf4.execute().fetch()
expected4.set_index('a', inplace=True)
result4.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
# merge on multiple columns
expected5 = df1.merge(df2, how='inner', on=['a', 'b'])
jdf5 = mdf1.merge(mdf2, how='inner', on=['a', 'b'])
result5 = jdf5.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected5, 0), sort_dataframe_inplace(result5, 0))
# merge when some on is index
expected6 = df3.merge(df2, how='inner', left_on='index', right_on='a')
jdf6 = mdf3.merge(mdf2, how='inner', left_on='index', right_on='a')
result6 = jdf6.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected6, 0), sort_dataframe_inplace(result6, 0))
# merge when on is in MultiIndex
expected7 = df4.merge(df2, how='inner', left_on='i1', right_on='a')
jdf7 = mdf4.merge(mdf2, how='inner', left_on='i1', right_on='a')
result7 = jdf7.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected7, 0), sort_dataframe_inplace(result7, 0))
# merge when on is in MultiIndex, and on not in index
expected8 = df4.merge(df2, how='inner', on=['a', 'b'])
jdf8 = mdf4.merge(mdf2, how='inner', on=['a', 'b'])
result8 = jdf8.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected8, 0), sort_dataframe_inplace(result8, 0))
def test_join(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], index=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
# default `how`
expected0 = df1.join(df2, lsuffix='l_', rsuffix='r_')
jdf0 = mdf1.join(mdf2, lsuffix='l_', rsuffix='r_')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(expected0.sort_index(), result0.sort_index())
# how = 'left'
expected1 = df1.join(df2, how='left', lsuffix='l_', rsuffix='r_')
jdf1 = mdf1.join(mdf2, how='left', lsuffix='l_', rsuffix='r_')
result1 = jdf1.execute().fetch()
pd.testing.assert_frame_equal(expected1.sort_index(), result1.sort_index())
# how = 'right'
expected2 = df1.join(df2, how='right', lsuffix='l_', rsuffix='r_')
jdf2 = mdf1.join(mdf2, how='right', lsuffix='l_', rsuffix='r_')
result2 = jdf2.execute().fetch()
pd.testing.assert_frame_equal(expected2.sort_index(), result2.sort_index())
# how = 'inner'
expected3 = df1.join(df2, how='inner', lsuffix='l_', rsuffix='r_')
jdf3 = mdf1.join(mdf2, how='inner', lsuffix='l_', rsuffix='r_')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(expected3.sort_index(), result3.sort_index())
# how = 'outer'
expected4 = df1.join(df2, how='outer', lsuffix='l_', rsuffix='r_')
jdf4 = mdf1.join(mdf2, how='outer', lsuffix='l_', rsuffix='r_')
result4 = jdf4.execute().fetch()
pd.testing.assert_frame_equal(expected4.sort_index(), result4.sort_index())
def test_join_on(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], columns=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], columns=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
expected0 = df1.join(df2, on=None, lsuffix='_l', rsuffix='_r')
jdf0 = mdf1.join(mdf2, on=None, lsuffix='_l', rsuffix='_r')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
expected1 = df1.join(df2, how='left', on='a1', lsuffix='_l', rsuffix='_r')
jdf1 = mdf1.join(mdf2, how='left', on='a1', lsuffix='_l', rsuffix='_r')
result1 = jdf1.execute().fetch()
# Note [Columns of Left Join]
#
# I believe we have no chance to obtain the entirely same result with pandas here:
#
# Look at the following example:
#
# >>> df1
# a1 a2 a3
# 0 1 3 3
# >>> df2
# a1 b2 b3
# 1 2 6 7
# >>> df3
# a1 b2 b3
# 1 2 6 7
# 1 2 6 7
#
# >>> df1.merge(df2, how='left', left_on='a1', left_index=False, right_index=True)
# a1_x a2 a3 a1_y b2 b3
# 0 1 3 3 2 6 7
# >>> df1.merge(df3, how='left', left_on='a1', left_index=False, right_index=True)
# a1 a1_x a2 a3 a1_y b2 b3
# 0 1 1 3 3 2 6 7
# 0 1 1 3 3 2 6 7
#
# Note that the result of `df1.merge(df3)` has an extra column `a` compared to `df1.merge(df2)`.
# The value of column `a` is the same of `a1_x`, just because `1` occurs twice in index of `df3`.
# I haven't invistagated why pandas has such behaviour...
#
# We cannot yield the same result with pandas, because, the `df3` is chunked, then some of the
# result chunk has 6 columns, others may have 7 columns, when concatenated into one DataFrame
# some cells of column `a` will have value `NaN`, which is different from the result of pandas.
#
# But we can guarantee that other effective columns have absolutely same value with pandas.
columns_to_compare = jdf1.columns_value.to_pandas()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1[columns_to_compare], 0, 1),
sort_dataframe_inplace(result1[columns_to_compare], 0, 1))
# Note [Index of Join on EmptyDataFrame]
#
# It is tricky that it is non-trivial to get the same `index` result with pandas.
#
# Look at the following example:
#
# >>> df1
# a1 a2 a3
# 1 4 2 6
# >>> df2
# a1 b2 b3
# 1 2 6 7
# 2 8 9 10
# >>> df3
# Empty DataFrame
# Columns: [a1, a2, a3]
# Index: []
# >>> df1.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
# a1_l a2 a3 a1_r b2 b3
# 1.0 4.0 2 6.0 8 9 10
# NaN NaN 1 NaN 2 6 7
# >>> df3.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
# a1_l a2 a3 a1_r b2 b3
# 1 NaN 1 NaN 2 6 7
# 2 NaN 2 NaN 8 9 10
#
# When the `left` dataframe is not empty, the mismatched rows in `right` will have index value `NaN`,
# and the matched rows have index value from `right`. When the `left` dataframe is empty, the mismatched
# rows have index value from `right`.
#
# Since we chunked the `left` dataframe, it is uneasy to obtain the same index value with pandas in the
# final result dataframe, but we guaranteed that the dataframe content is correctly.
expected2 = df1.join(df2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
jdf2 = mdf1.join(mdf2, how='right', on='a2', lsuffix='_l', rsuffix='_r')
result2 = jdf2.execute().fetch()
expected2.set_index('a2', inplace=True)
result2.set_index('a2', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
expected3 = df1.join(df2, how='inner', on='a2', lsuffix='_l', rsuffix='_r')
jdf3 = mdf1.join(mdf2, how='inner', on='a2', lsuffix='_l', rsuffix='_r')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
expected4 = df1.join(df2, how='outer', on='a2', lsuffix='_l', rsuffix='_r')
jdf4 = mdf1.join(mdf2, how='outer', on='a2', lsuffix='_l', rsuffix='_r')
result4 = jdf4.execute().fetch()
expected4.set_index('a2', inplace=True)
result4.set_index('a2', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
def test_merge_one_chunk(setup):
df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5]}, index=['a1', 'a2', 'a3', 'a4'])
df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['a1', 'a2', 'a3', 'a4'])
# all have one chunk
mdf1 = from_pandas(df1)
mdf2 = from_pandas(df2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
# left have one chunk
mdf1 = from_pandas(df1)
mdf2 = from_pandas(df2, chunk_size=2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
# right have one chunk
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2)
expected = df1.merge(df2, left_on='lkey', right_on='rkey')
jdf = mdf1.merge(mdf2, left_on='lkey', right_on='rkey')
result = jdf.execute().fetch()
pd.testing.assert_frame_equal(expected.sort_values(by=expected.columns[1]).reset_index(drop=True),
result.sort_values(by=result.columns[1]).reset_index(drop=True))
def test_merge_on_duplicate_columns(setup):
raw1 = pd.DataFrame([['foo', 1, 'bar'],
['bar', 2, 'foo'],
['baz', 3, 'foo']],
columns=['lkey', 'value', 'value'],
index=['a1', 'a2', 'a3'])
raw2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['a1', 'a2', 'a3', 'a4'])
df1 = from_pandas(raw1, chunk_size=2)
df2 = from_pandas(raw2, chunk_size=3)
r = df1.merge(df2, left_on='lkey', right_on='rkey')
result = r.execute().fetch()
expected = raw1.merge(raw2, left_on='lkey', right_on='rkey')
pd.testing.assert_frame_equal(expected, result)
def test_append_execution(setup):
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=3)
adf = mdf1.append(mdf2)
expected = df1.append(df2)
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(mdf2, ignore_index=True)
expected = df1.append(df2, ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=2)
adf = mdf1.append(mdf2)
expected = df1.append(df2)
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(mdf2, ignore_index=True)
expected = df1.append(df2, ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
df3 = pd.DataFrame(np.random.rand(8, 4), columns=list('ABCD'))
mdf3 = from_pandas(df3, chunk_size=3)
expected = df1.append([df2, df3])
adf = mdf1.append([mdf2, mdf3])
result = adf.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
adf = mdf1.append(dict(A=1, B=2, C=3, D=4), ignore_index=True)
expected = df1.append(dict(A=1, B=2, C=3, D=4), ignore_index=True)
result = adf.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
# test for series
series1 = pd.Series(np.random.rand(10,))
series2 = pd.Series(np.random.rand(10,))
mseries1 = series_from_pandas(series1, chunk_size=3)
mseries2 = series_from_pandas(series2, chunk_size=3)
aseries = mseries1.append(mseries2)
expected = series1.append(series2)
result = aseries.execute().fetch()
pd.testing.assert_series_equal(expected, result)
aseries = mseries1.append(mseries2, ignore_index=True)
expected = series1.append(series2, ignore_index=True)
result = aseries.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_series_equal(expected, result)
mseries1 = series_from_pandas(series1, chunk_size=3)
mseries2 = series_from_pandas(series2, chunk_size=2)
aseries = mseries1.append(mseries2)
expected = series1.append(series2)
result = aseries.execute().fetch()
pd.testing.assert_series_equal(expected, result)
aseries = mseries1.append(mseries2, ignore_index=True)
expected = series1.append(series2, ignore_index=True)
result = aseries.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_series_equal(expected, result)
series3 = pd.Series(np.random.rand(4,))
mseries3 = series_from_pandas(series3, chunk_size=2)
expected = series1.append([series2, series3])
aseries = mseries1.append([mseries2, mseries3])
result = aseries.execute().fetch()
pd.testing.assert_series_equal(expected, result)
def test_concat(setup):
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=3)
r = concat([mdf1, mdf2])
expected = pd.concat([df1, df2])
result = r.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
# test different chunk size and ignore_index=True
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=3)
r = concat([mdf1, mdf2], ignore_index=True)
expected = pd.concat([df1, df2], ignore_index=True)
result = r.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_frame_equal(expected, result)
# test axis=1
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=3)
r = concat([mdf1, mdf2], axis=1)
expected = pd.concat([df1, df2], axis=1)
result = r.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
# test multiply dataframes
r = concat([mdf1, mdf2, mdf1])
expected = pd.concat([df1, df2, df1])
result = r.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.rand(10, 3), columns=list('ABC'))
mdf1 = from_pandas(df1, chunk_size=3)
mdf2 = from_pandas(df2, chunk_size=3)
# test join=inner
r = concat([mdf1, mdf2], join='inner')
expected = pd.concat([df1, df2], join='inner')
result = r.execute().fetch()
pd.testing.assert_frame_equal(expected, result)
# test for series
series1 = pd.Series(np.random.rand(10,))
series2 = pd.Series(np.random.rand(10,))
mseries1 = series_from_pandas(series1, chunk_size=3)
mseries2 = series_from_pandas(series2, chunk_size=3)
r = concat([mseries1, mseries2])
expected = pd.concat([series1, series2])
result = r.execute().fetch()
pd.testing.assert_series_equal(result, expected)
# test different series and ignore_index
mseries1 = series_from_pandas(series1, chunk_size=4)
mseries2 = series_from_pandas(series2, chunk_size=3)
r = concat([mseries1, mseries2], ignore_index=True)
expected = pd.concat([series1, series2], ignore_index=True)
result = r.execute(extra_config={'check_index_value': False}).fetch()
pd.testing.assert_series_equal(result, expected)
# test axis=1
mseries1 = series_from_pandas(series1, chunk_size=3)
mseries2 = series_from_pandas(series2, chunk_size=3)
r = concat([mseries1, mseries2], axis=1)
expected = pd.concat([series1, series2], axis=1)
result = r.execute(extra_config={'check_shape': False}).fetch()
pd.testing.assert_frame_equal(result, expected)
# test merge dataframe and series
r = concat([mdf1, mseries2], ignore_index=True)
expected = pd.concat([df1, series2], ignore_index=True)
result = r.execute(extra_config={'check_index_value': False}).fetch()
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.