prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
| assert_almost_equal(copy.labels, original.labels) | pandas.util.testing.assert_almost_equal |
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = | pd.read_csv('library/avprofiles/him_av.csv') | pandas.read_csv |
# <NAME>
# <EMAIL>
import copy
import pandas as pd
from recentroid.recentroid import Recentroid
import scipy.ndimage
from astropy.io import fits
import numpy as np
class ImageAlign:
"""
ImageAlign is a class to facilitate aligning two images by matching centroids of sources in pixel (x,y) coordinate.
+ How to use:
- Instantiate ImageAlign object.
- Call public methods to compute outputs.
- Call save() to save outputs.
+ Attributes:
- self.image1, self.image2 = 2D array of images
- self.source1, self.source2 = a dict with each key as source name and value as a dict with keys = {'X','Y'} and values as pixel coordinate (x,y) for each corresdponding key.
> source1 and source2 must run parallelly.
> source1 and source2 are recorded as they are regardless of index scheme.
> They will be changed to zero-indexing scheme after any calling method such as compute_to_zero_index or compute_recentroid.
- self.source_zero_indexing = boolean. True if the source1 and source2 as inputs are in zero-indexing scheme (i.e., python standard). False if they are one-indexing scheme (e.g., DS9 standard).
> If any call changes source1 and source2 to zero-indexing scheme, source_zero_indexing would also be updated to True.
- self.source_shift = simply source1 - source2 for the shift in (x,y) coordinate available after running self.compute_shift()
- self.shift = shift values in pixel unit available after running self.compute_shift()
+ Methods (public):
- self.compute_to_zero_index() = simply convert source1 and source from one-indexing to zero-indexing by (x,y) -= (1.,1.).
> This also updates self.source_zero_indexing to True.
> Calling compute_recentroid() also implements compute_to_zero_index().
- self.compute_recentroid(box_size,centroid_func) = use the initial source1 and source2, and re-centroid. New centroids will replace source1 and source2, and source_zero_indexing sets to True.
> Package Recentroid (pip install recentroid) is used in this step.
> box_size = a search area for a new centroid specified by a square of size box_size centered on the initial centroid.
> centroid_func = centroid function. See the package Recentroid (pip install recentroid) for the support.
+ If None, centroid_func = photutils.centroid_2dg
- self.compute_shift() = use source1 and source2 to compute shifts in pixel unit.
> self.source_shift = source1 - source2
> self.shift = mean and std from source_shift
- self.make_shift_image(shift_more, order, fill_value) = use self.shift and shift_more to shift image2. Output as self.image2_shifted.
> Package scipy.ndimage.shift is used in this process.
> shift_more = a tuple (x,y) specifying arbitrary shift in addition to self.shift: image2_shifted = image2 + self.shift + shift_more.
> order = integer for spline interpolation
> fill_value = float to be assigned to any none finite value in image2 before shift.
> self.image2_shifted is the output.
- self.save(container) = save all outputs as name convention ./savefolder/saveprefix_savesuffix.extension where savefolder and saveprefix are specified by Container class.
> container = imagealign.container.Container class
> Other switches include save_zero_indexing, save_recentroid, save_shift, save_shifted_image, save_shifted_image_overwrite.
> Outputs include
+ recentroid1.reg, recentroid2.reg = new centroids from source1 and source2 respectively. Each file has two columns of (x,y) coordinate with space separation.
+ shift.csv = self.shift in csv format
+ shifted.fits = fits file with self.image2_shifted in EXT1 with EXT0 as an empty primary HDU.
+ Methods (private):
- self._to_zero_index() = converting one-indexing to zero-indexing schemes, and also updating source1, source2, and source_zero_indexing = True.
> simply performing x_zero_index = x_one_index - 1., and vice versa for y.
"""
def __init__(self,image1,image2,source1,source2,source_zero_indexing=True):
self.image1 = image1
self.image2 = image2
self.source1 = source1
self.source2 = source2
self.source_zero_indexing = source_zero_indexing
def compute_recentroid(self,box_size=10,centroid_func=None):
# update to zero indexing scheme
if not self.source_zero_indexing:
self._to_zero_index()
#####
t1 = Recentroid(self.source1,self.image1,box_size,centroid_func)
t1.compute()
t2 = Recentroid(self.source2,self.image2,box_size,centroid_func)
t2.compute()
#####
self.source1 = t1.source_table_new.T.to_dict()
self.source2 = t2.source_table_new.T.to_dict()
print('Recentroid source1 and source2.')
def compute_shift(self):
# update to zero indexing scheme
if not self.source_zero_indexing:
self._to_zero_index()
##### self.source_shift
source1 = | pd.DataFrame(self.source1) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
| pdt.assert_frame_equal(df_helper, df_stored) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from datetime import date, timedelta
import gc
def load_data():
# df_train = pd.read_feather('train_after1608_raw')
df_train = pd.read_csv('train.csv', usecols=[1, 2, 3, 4, 5], dtype={'onpromotion': bool},
converters={'unit_sales': lambda u: np.log1p(float(u)) if float(u) > 0 else 0},
parse_dates=["date"])
df_test = pd.read_csv("test.csv", usecols=[0, 1, 2, 3, 4], dtype={'onpromotion': bool},
parse_dates=["date"]).set_index(['store_nbr', 'item_nbr', 'date'])
# subset data
df_2017 = df_train.loc[df_train.date>=pd.datetime(2016,1,1)]
# promo
promo_2017_train = df_2017.set_index(
["store_nbr", "item_nbr", "date"])[["onpromotion"]].unstack(
level=-1).fillna(False)
promo_2017_train.columns = promo_2017_train.columns.get_level_values(1)
promo_2017_test = df_test[["onpromotion"]].unstack(level=-1).fillna(False)
promo_2017_test.columns = promo_2017_test.columns.get_level_values(1)
promo_2017_test = promo_2017_test.reindex(promo_2017_train.index).fillna(False)
promo_2017 = pd.concat([promo_2017_train, promo_2017_test], axis=1)
del promo_2017_test, promo_2017_train
df_2017 = df_2017.set_index(
["store_nbr", "item_nbr", "date"])[["unit_sales"]].unstack(
level=-1).fillna(0)
df_2017.columns = df_2017.columns.get_level_values(1)
# items
items = pd.read_csv("items.csv").set_index("item_nbr")
stores = pd.read_csv("stores.csv").set_index("store_nbr")
# items = items.reindex(df_2017.index.get_level_values(1))
return df_2017, promo_2017, items, stores
def save_unstack(df, promo, filename):
df_name, promo_name = 'df_' + filename + '_raw', 'promo_' + filename + '_raw'
df.columns = df.columns.astype('str')
df.reset_index().to_feather(df_name)
promo.columns = promo.columns.astype('str')
promo.reset_index().to_feather(promo_name)
def load_unstack(filename):
df_name, promo_name = 'df_' + filename + '_raw', 'promo_' + filename + '_raw'
df_2017 = pd.read_feather(df_name).set_index(['store_nbr','item_nbr'])
df_2017.columns = pd.to_datetime(df_2017.columns)
promo_2017 = pd.read_feather(promo_name).set_index(['store_nbr','item_nbr'])
promo_2017.columns = pd.to_datetime(promo_2017.columns)
items = pd.read_csv("items.csv").set_index("item_nbr")
stores = | pd.read_csv("stores.csv") | pandas.read_csv |
"""Pandas utilities for DataFrames, Series, etc. """
import pandas as pd
import numpy as np
from bisect import bisect_left
import datetime
import os
import re
from .utils import gen_filename
from .data_uri import bytes_to_uri
from .ipython import HTML
# ---------------------------------------------------------------------------
def tabulate_list(lst):
"""Show list content as unescaped HTML table."""
return HTML(
pd.DataFrame(lst)
.style
.hide_index()
.set_table_styles([{'selector': 'thead',
'props': [('display', 'none')]}])
.render(escape=False))
def markdown_to_html(non_p_string) -> str:
''' Strip enclosing paragraph marks, <p> ... </p>,
which markdown() forces, and which interfere with some jinja2 layout
'''
from markdown import markdown as markdown_to_html_with_p
return re.sub("(^<P>|</P>$)", "", markdown_to_html_with_p(non_p_string), flags=re.IGNORECASE)
# ---------------------------------------------------------------------------
# dataframe to SVG conversion via command line tools
def make_table_html(df, title=''):
'''
Write an entire dataframe to an HTML string with nice formatting.
'''
result = '''
<html>
<head>
<style>
h2 {
text-align: center;
font-family: sans-serif;
}
table {
margin-left: auto;
margin-right: auto;
}
table, th, td {
#font-family: sans-serif;
#border: 1px solid black;
#border-collapse: collapse;
}
th, td {
text-align: left;
font-family: monospace;
font-size:10;
padding: 0 2px;
thead tr {
font-family: sans-serif;
text-align: center;
}
.wide {
width: 90%;
}
</style>
</head>
<body>
'''
#-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Oxygen-Sans,Ubuntu,Cantarell,"Helvetica Neue", sans-serif
result += '<h2> %s </h2>' % title
if type(df) == pd.io.formats.style.Styler:
result += df.render()
else:
result += df.to_html(classes='wide', escape=False)
result += '''
</body>
</html>
'''
return result
def write_to_html_file(df_sl, filename='out.html'):
result = make_table_html(df_sl)
with open(filename, 'w', encoding='utf-8') as f:
f.write(result)
def make_df_svg_uri(df_sl, fnhead, show_errors=False,
do_remove_files=True,
do_optimize_svg=False):
"""Display a dataframe as formatted HTML and capture as SVG in URI form.
The URI can be used in an <img> tag to display the dataframe as SVG.
"""
fnbase = f"{fnhead}_{gen_filename()}"
outfile = f'{fnbase}.html'
pdffile = f'{fnbase}.pdf'
svgfile = f'{fnbase}.svg'
write_to_html_file(df_sl, filename=outfile)
#--disable-smart-shrinking
# --page-width 8in --page-height 11in\
if show_errors:
debug_err = ''
debug_std = ''
else:
debug_err = '2> /dev/null'
debug_std = '> /dev/null'
os.system(f"wkhtmltopdf --dpi 120 -T 0 -B 0 -L 0 -R 0 --encoding utf-8 --custom-header 'meta' 'charset=utf-8' "
f"{outfile} {pdffile} {debug_err}")
os.system(f"pdfcrop {pdffile} {debug_std}")
#os.system(f"inkscape -l {svgfile} --export-area-drawing --vacuum-defs {pdffile}")
os.system(f"inkscape {pdffile.replace('.pdf','-crop.pdf')} --vacuum-defs --export-filename={svgfile} {debug_err}")
#os.system(f"inkscape {pdffile} --vacuum-defs --export-filename={svgfile} {debug_err}")
# os.system(f"pdf2svg {pdffile.replace('.pdf','-crop.pdf')} {svgfile}")
# os.system("sleep .5")
if do_optimize_svg:
os.system(f"svgo {svgfile} {debug_std}")
dat_uri = bytes_to_uri(open(svgfile,'rb').read(), imgtype='svg+xml')
if do_remove_files:
os.system(f"rm -f {fnbase}*")
return dat_uri
def dataframe_svg_html(df_sl, width="90%"):
dat_uri = make_df_svg_uri(df_sl, fnhead='sl_table')
return HTML(f"<img src='{dat_uri}' width={width}/>")
def hide_repeated_cells(x):
"""Hide values that are the same as the row above"""
c1='visibility:hidden'
c2=''
cond = x.iloc[:-1,:].values == x.iloc[1:,:].values
cr = cond[0].copy()
cr[:] = False
cond = np.vstack([cr,cond])
df1 = pd.DataFrame(np.where(cond,c1,c2),columns=x.columns,index=x.index)
return df1
#----------------------------------------------------------------------------
def index_columns(df, none_name=None):
"""Return list of column names that form the (multi-)index or None, if index is a single unnamed
column."""
try:
return [l.name for l in df.index.levels]
except AttributeError:
name = df.index.name
if name is not None:
return [name]
elif none_name is not None:
return [none_name]
def split_by_index(df, split_idx):
"""
Split DataFrame df at or around the given index value
:param df: DataFrame with a sorted index
:type df: pandas.DataFrame
:param split_idx: The index value to split df by.
:type split_idx: int
:return: - **low_df** (`pandas.DataFrame`) - DataFrame containing index values below split_idx
- **high_df** (`pandas.DataFrame`) - DataFrame containing index values greater than or
equal to split_idx.
"""
try:
idx = df.index.get_loc(split_idx)
except KeyError:
idx = bisect_left(df.index, split_idx)
return df.iloc[:idx, :], df.iloc[idx:, :]
def update_on(df, dfu, on=None):
"""Use DataFrame.update() function inplace, matching on any set of columns."""
if on:
inames = index_columns(df)
uinames = index_columns(dfu)
df.reset_index(inplace=True)
df.set_index(on, inplace=True)
if uinames is not None:
df.update(dfu.reset_index().set_index(on))
else:
# id dfu index is unnamed, drop it to avoid collision with df index
df.update(dfu.set_index(on))
if inames is None:
df.reset_index(inplace=True)
df.set_index('index', inplace=True)
df.index.name = None
else:
df.reset_index(inplace=True)
df.set_index('index', inplace=True)
else:
df.update(dfu)
def dataframe_schema(columns, dtypes):
"""Create empty pd.DataFrame with columns of given datatypes"""
df_dict = {cname: pd.Series([], dtype=dtype) for cname, dtype in zip(columns, dtypes)}
return pd.DataFrame(df_dict)
def remove_microsecond(ts):
return pd.Timestamp(year=ts.year, month=ts.month, day=ts.day, hour=ts.hour, second=ts.second)
def get_next_index(df, index_val, lock_bound=False, inc=+1):
"""Determine the index value that follows `index_val`
:param df: dataframe or series, having df.index.
:type df: pd.DataFrame or pd.Series
:param index_val: index value to start from
:param lock_bound: if true return same index if reaching bounds
:type lock_bound: bool
:param inc: Increment. default +1, use -1 to get previous index
:type inc: int
:return: neighbouring index value
"""
index_value_iloc = df.index.get_loc(index_val)
next_iloc = index_value_iloc + inc
try:
next_index_value = df.index[next_iloc]
except IndexError:
if lock_bound:
return index_value_iloc
else:
next_index_value = None
return next_index_value
# ---------------------------------------------------------------------------
def value_counts_weighted(df, fields, weight_name='weight', count_name=None, ascending=None):
"""Replacement for pandas DataFrame.value_counts() summing the column given in `weight_name`.
To obtain raw counts, as provided by original value_counts, use a column with contant 1.
Args:
df - dataframe to perform value counts for
fields - fields whose values should be counted
weight_name - name of weight column
count_name - name for resulting field containing counts (default: 'count')
ascending - True/False for sorting order, None to keep original order (default)
Returns:
pandas Series of counts
"""
vc_df = df.groupby(fields)[weight_name].sum()
if ascending is None:
pass
elif isinstance(fields, str):
vc_df = vc_df.sort_values(ascending=ascending).rename()
if count_name is None:
count_name = fields
vc_df.index.name = None
else:
# If multiple fields are use, vc_df is going to have a multi-index.
# Construct a multi-index that produces the sorted order from first to last field
# TODO: check this implementation or remove it
def gen_all():
for field in fields:
yield value_counts_weighted(df, field).sort_values(ascending=ascending).index
vc_df = vc_df.loc[tuple(list(gen_all()))]
if not count_name is None:
vc_df = vc_df.rename(count_name)
return vc_df
def make_weekday_df(start_date, end_date):
"""Make a pandas dataframe of weekday names for dates.
The 'date' column is a string formatted as, e.g. 2019/07/25,
the 'day' column is the full weekday name, lower case (sunday, monday, ...)
"""
delta = datetime.timedelta(days=1)
day = []
date = []
while start_date <= end_date:
day.append(start_date.strftime('%A').lower())
date.append(str(start_date).replace("-","/"))
start_date+= delta
df_weekday = pd.DataFrame()
df_weekday['date'] = date
df_weekday['day'] = day
df_weekday['is_weekend'] = df_weekday['day'].isin(['sunday','saturday'])
return df_weekday
def make_month_start_end_dates(month, year=2019):
start_date = datetime.date(year, int(month), 1)
end_date = (start_date + | pd.offsets.DateOffset(months=1, days=-1) | pandas.offsets.DateOffset |
import numpy as np
import pandas as pd
from scipy import stats
__all__ = ['bootci_pd',
'permtest_pd']
def bootci_pd(df, statfunction, alpha=0.05, n_samples=10000, method='bca'):
"""Estimate bootstrap CIs for a statfunction that operates along the rows of
a pandas.DataFrame and return a dict or pd.Series of results. Returning
a dict is typically faster.
This is about 10x slower than using scikits.bootstrap.ci for a statistic
doesn't require resampling the whole DataFrame. However, if the statistic
requires the whole DataFrame or you are computing many statistics on the
same DataFrame that all require CIs, then this function may be efficient.
Parameters
----------
df : pd.DataFrame
Data that will be passed to statfunction as a single parameter.
statfunction : function
Function that should operate along the rows of df and return a dict
alpha : float [0, 1]
Specify CI: [alpha/2, 1-alpha/2]
n_samples : int
Number of bootstrap samples.
method : str
Specify bias-corrected and accelerated ("bca") or percentile ("pi")
bootstrap.
Returns
-------
cis : pd.Series [est, lcl, ucl]
Point-estimate and CI of statfunction of df"""
alphas = np.array([alpha/2, 1-alpha/2])
# The value of the statistic function applied just to the actual data.
res = pd.Series(statfunction(df))
#st = time.time()
boot_res = []
for i in range(n_samples):
# rind = np.random.randint(df.shape[0], size=df.shape[0])
# boot_res.append(statfunction(df.iloc[rind]))
boot_res.append(statfunction(df.sample(frac=1, replace=True)))
boot_res = | pd.DataFrame(boot_res) | pandas.DataFrame |
# FIT DATA TO A CURVE
# <NAME> - MIT Licence
# inspired by @dimgrr. Based on
# https://towardsdatascience.com/basic-curve-fitting-of-scientific-data-with-python-9592244a2509?gi=9c7c4ade0880
# https://github.com/venkatesannaveen/python-science-tutorial/blob/master/curve-fitting/curve-fitting-tutorial.ipynb
# https://www.reddit.com/r/CoronavirusUS/comments/fqx8fn/ive_been_working_on_this_extrapolation_for_the/
# to explore : https://github.com/fcpenha/Gompertz-Makehan-Fit/blob/master/script.py
# Import required packages
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.dates as mdates
import copy, math
from lmfit import Model
import pandas as pd
import streamlit as st
import datetime as dt
from datetime import datetime, timedelta
import matplotlib.animation as animation
import imageio
import streamlit.components.v1 as components
import os
import platform
import webbrowser
from pandas import read_csv, Timestamp, Timedelta, date_range
from io import StringIO
from numpy import log, exp, sqrt, clip, argmax, put
from scipy.special import erfc, erf
from matplotlib.pyplot import subplots
from matplotlib.ticker import StrMethodFormatter
from matplotlib.dates import ConciseDateFormatter, AutoDateLocator
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
from PIL import Image
import glob
# Functions to calculate values a,b and c ##########################
def exponential(x, a, b, c):
''' Standard gompertz function
a = height, b= halfway point, c = growth rate
https://en.wikipedia.org/wiki/Gompertz_function '''
return a * np.exp(-b * np.exp(-c * x))
def derivate(x, a, b, c):
''' First derivate of the Gompertz function. Might contain an error'''
return (np.exp(b * (-1 * np.exp(-c * x)) - c * x) * a * b * c ) + BASEVALUE
#return a * b * c * np.exp(-b*np.exp(-c*x))*np.exp(-c*x)
def derivate_of_derivate(x,a,b,c):
return a*b*c*(b*c*exp(-c*x) - c)*exp(-b*exp(-c*x) - c*x)
def gaussian(x, a, b, c):
''' Standard Guassian function. Doesnt give results, Not in use'''
return a * np.exp(-np.power(x - b, 2) / (2 * np.power(c, 2)))
def gaussian_2(x, a, b, c):
''' Another gaussian fuctnion. in use
a = height, b = cen (?), c= width '''
return a * np.exp(-((x - b) ** 2) / c)
def growth(x, a, b):
""" Growth model. a is the value at t=0. b is the so-called R number.
Doesnt work. FIX IT """
return np.power(a * 0.5, (x / (4 * (math.log(0.5) / math.log(b)))))
# https://replit.com/@jsalsman/COVID19USlognormals
def lognormal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * erfc(- (log(x) - mu) / (s * sqrt(2)))
# https://en.wikipedia.org/wiki/Log-normal_distribution#Cumulative_distribution_function
def normal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * (1 + erf((x - mu) / (s * sqrt(2))))
# #####################################################################
def find_gaussian_curvefit(x_values, y_values):
try:
popt_g2, pcov_g2 = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[0, 0, 0],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
except RuntimeError as e:
str_e = str(e)
st.error(f"gaussian fit :\n{str_e}")
return tuple(popt_g2)
def use_curvefit(x_values, x_values_extra, y_values, title, daterange,i):
"""
Use the curve-fit from scipy.
IN : x- and y-values. The ___-extra are for "predicting" the curve
"""
with _lock:
st.subheader(f"Curvefit (scipy) - {title}")
fig1x = plt.figure()
try:
a_start, b_start, c_start = 0,0,0
popt, pcov = curve_fit(
f=exponential,
xdata=x_values,
ydata=y_values,
#p0=[4600, 11, 0.5],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
exponential(x_values_extra, *popt),
"r-",
label="exponential fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Exponential fit :\n{str_e}")
try:
popt_d, pcov_d = curve_fit(
f=derivate,
xdata=x_values,
ydata=y_values,
#p0=[0, 0, 0],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
derivate(x_values_extra, *popt_d),
"g-",
label="derivate fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_d),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Derivate fit :\n{str_e}")
# FIXIT
# try:
# popt_growth, pcov_growth = curve_fit(
# f=growth,
# xdata=x_values,
# ydata=y_values,
# p0=[500, 0.0001],
# bounds=(-np.inf, np.inf),
# maxfev=10000,
# )
# plt.plot(
# x_values_extra,
# growth(x_values_extra, *popt_growth),
# "y-",
# label="growth: a=%5.3f, b=%5.3f" % tuple(popt_growth),
# )
# except:
# st.write("Error with growth model fit")
try:
popt_g, pcov_g = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[a_start, b_start, c_start ],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
gaussian_2(x_values_extra, *popt_g),
"b-",
label="gaussian fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_g),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Gaussian fit :\n{str_e}")
plt.scatter(x_values, y_values, s=20, color="#00b3b3", label="Data")
plt.legend()
plt.title(f"{title} / curve_fit (scipy)")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
# POGING OM DATUMS OP DE X-AS TE KRIJGEN (TOFIX)
# plt.xlim(daterange[0], daterange[-1])
# lay-out of the x axis
# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
# interval_ = 5
# plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=interval_))
# plt.gcf().autofmt_xdate()
#plt.show()
filename= (f"{OUTPUT_DIR}scipi_{title}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1x)
# def make_gif(filelist):
# # Create the frames
# frames = []
# imgs = glob.glob("*.png")
# for i in imgs:
# new_frame = Image.open(i)
# frames.append(new_frame)
#
# # Save into a GIF file that loops forever
# frames[0].save('png_to_gif.gif', format='GIF',
# append_images=frames[1:],
# save_all=True,
# duration=300, loop=0)
def use_lmfit(x_values, y_values, functionlist, title,i, max_y_values):
"""
Use lmfit.
IN : x- and y-values.
functionlist (which functions to use)
adapted from https://stackoverflow.com/a/49843706/4173718
TODO: Make all graphs in one graph
"""
a_start, b_start, c_start = 0,0,0
for function in functionlist:
#placeholder0.subheader(f"LMFIT - {title} - {function}")
# create a Model from the model function
if function == "exponential":
bmodel = Model(exponential)
formula = "a * np.exp(-b * np.exp(-c * x))"
elif function == "derivate":
bmodel = Model(derivate)
formula = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "gaussian":
bmodel = Model(gaussian_2)
formula = "a * np.exp(-((x - b) ** 2) / c)"
else:
st.write("Please choose a function")
st.stop()
# create Parameters, giving initial values
#params = bmodel.make_params(a=4711, b=12, c=0.06)
params = bmodel.make_params(a=a_start, b=b_start, c=c_start) # IC BEDDEN MAART APRIL
# params = bmodel.make_params()
params["a"].min = a_start
params["b"].min = b_start
params["c"].min = c_start
# do fit, st.write result
result = bmodel.fit(y_values, params, x=x_values)
a = round(result.params['a'].value,5)
b= round(result.params['b'].value,5)
c =round(result.params['c'].value,5)
placeholder1.text(result.fit_report())
with _lock:
#fig1y = plt.figure()
fig1y, ax1 = plt.subplots()
ax2 = ax1.twinx()
# plot results -- note that `best_fit` is already available
ax1.scatter(x_values, y_values, color="#00b3b3", s=2)
#ax1.plot(x_values, result.best_fit, "g")
res = (f"a: {a} / b: {b} / c: {c}")
plt.title(f"{title} / lmfit - {function}\n{formula}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
ax1.plot(t, bmodel.eval(result.params, x=t), "r-")
ax2.plot (t, derivate_of_derivate(t,a,b,c), color = 'purple')
ax2.axhline(linewidth=1, color='purple', alpha=0.5, linestyle="--")
#ax1.plot (t, derivate(t,26660.1, 9.01298, 0.032198), color = 'purple')
#ax2.plot (t, derivate_of_derivate(t,26660.1, 9.01298, 0.032198), color = 'yellow')
#plt.ylim(bottom=0)
#ax1.ylim(0, max_y_values*1.1)
#ax1.set_ylim(510,1200)
#ax2.set_ylim(0,12)
ax1.set_xlabel(f"Days from {from_}")
ax1.set_ylabel(f"{title} - red")
ax2.set_ylabel("delta - purple")
#plt.show()
filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
placeholder.pyplot(fig1y)
if prepare_for_animation == False:
with _lock:
fig1z = plt.figure()
# plot results -- note that `best_fit` is already available
if function == "exponential":
plt.plot(t, derivate(t,a,b,c))
function_x = "derivate"
formula_x = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "derivate":
plt.plot(t, exponential(t, a,b,c))
function_x = "exponential"
formula_x = "a * np.exp(-b * np.exp(-c * x))"
else:
st.error("ERROR")
st.stop()
plt.title(f"{title} / {function_x}\n{formula_x}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
#plt.plot(t, bmodel.eval(result.params, x=t), "r-")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
plt.ylabel(title)
#plt.show()
#filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
#plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1z)
return filename
def fit_the_values_really(x_values, y_values, which_method, title, daterange,i, max_y_values):
x_values_extra = np.linspace(
start=0, stop=TOTAL_DAYS_IN_GRAPH - 1, num=TOTAL_DAYS_IN_GRAPH
)
x_values = x_values[:i]
y_values = y_values[:i]
if prepare_for_animation == False:
use_curvefit(x_values, x_values_extra, y_values, title, daterange,i)
return use_lmfit(x_values,y_values, [which_method], title,i, max_y_values)
def fit_the_values(to_do_list , total_days, daterange, which_method, prepare_for_animation):
"""
We are going to fit the values
"""
# Here we go !
st.header("Fitting data to formulas")
infox = (
'<br>Exponential / Standard gompertz function : <i>a * exp(-b * np.exp(-c * x))</i></li>'
'<br>First derivate of the Gompertz function : <i>a * b * c * exp(b * (-1 * exp(-c * x)) - c * x)</i></li>'
'<br>Gaussian : <i>a * exp(-((x - b) ** 2) / c)</i></li>'
'<br>Working on growth model: <i>(a * 0.5 ^ (x / (4 * (math.log(0.5) / math.log(b)))))</i> (b will be the Rt-number)</li>'
)
st.markdown(infox, unsafe_allow_html=True)
global placeholder0, placeholder, placeholder1
placeholder0 = st.empty()
placeholder = st.empty()
placeholder1 = st.empty()
el = st.empty()
for v in to_do_list:
title = v[0]
y_values = v[1]
max_y_values = max(y_values)
# some preperations
number_of_y_values = len(y_values)
global TOTAL_DAYS_IN_GRAPH
TOTAL_DAYS_IN_GRAPH = total_days # number of total days
x_values = np.linspace(start=0, stop=number_of_y_values - 1, num=number_of_y_values)
if prepare_for_animation == True:
filenames = []
for i in range(5, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
filenames.append(filename)
# build gif
with imageio.get_writer('mygif.gif', mode='I') as writer:
for filename_ in filenames:
image = imageio.imread(f"{filename_}.png")
writer.append_data(image)
webbrowser.open('mygif.gif')
# Remove files
for filename__ in set(filenames):
os.remove(f"{filename__}.png")
else:
for i in range(len(x_values)-1, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
# FIXIT
# aq, bq, cq = find_gaussian_curvefit(x_values, y_values)
# st.write(f"Find Gaussian curvefit - a:{aq} b:{bq} c: {cq}")
def select_period(df, show_from, show_until):
""" _ _ _ """
if show_from is None:
show_from = "2020-2-27"
if show_until is None:
show_until = "2020-4-1"
mask = (df[DATEFIELD].dt.date >= show_from) & (df[DATEFIELD].dt.date <= show_until)
df = df.loc[mask]
df = df.reset_index()
return df
def normal_c(df):
#https://replit.com/@jsalsman/COVID19USlognormals
st.subheader("Normal_c")
df = df.set_index(DATEFIELD)
firstday = df.index[0] + Timedelta('1d')
nextday = df.index[-1] + Timedelta('1d')
lastday = df.index[-1] + Timedelta(TOTAL_DAYS_IN_GRAPH - len(df), 'd') # extrapolate
with _lock:
#fig1y = plt.figure()
fig1yz, ax = subplots()
ax.set_title('NL COVID-19 cumulative log-lognormal extrapolations\n'
+ 'Source: repl.it/@jsalsman/COVID19USlognormals')
x = ((df.index - Timestamp('2020-01-01')) # independent
// | Timedelta('1d') | pandas.Timedelta |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from collections import OrderedDict
import gc
from current_clamp import *
from current_clamp_features import extract_istep_features
from visualization.feature_annotations import feature_name_dict
from read_metadata import *
from file_io import load_current_step
# from pymysql import IntegrityError
import datajoint as dj
schema = dj.schema('yueqi_ephys', locals())
FIG_DIR = 'analysis_current_clamp/figures_plot_recording'
'''
class DjImportedFromDirectory(dj.Imported):
# Subclass of Imported. Initialize with data directory.
def __init__(self, directory=''):
self.directory = directory
super().__init__()
'''
@schema
class EphysExperimentsForAnalysis(dj.Manual):
definition = """
# Ephys experiments (excel files) for analysis
experiment: varchar(128) # excel files to use for analysis
---
project: varchar(128) # which project the data belongs to
use: enum('Yes', 'No') # whether to use this experiment
directory: varchar(256) # the parent project directory
"""
def insert_experiment(self, excel_file):
'''
Insert new sample ephys metadata from excel to datajoint tables
'''
entry_list = pd.read_excel(excel_file)[['experiment', 'project', 'use', 'directory']].dropna(how='any')
entry_list = entry_list.to_dict('records')
no_insert = True
for entry in entry_list:
if entry['use'] == 'No':
continue
self.insert1(row=entry, skip_duplicates=True)
no_insert = False
#print("Inserted: " + str(entry))
if no_insert:
print("No new entry inserted.")
return
@schema
class Animals(dj.Imported):
definition = """
# Sample metadata
-> EphysExperimentsForAnalysis
---
id: varchar(128) # organod ID (use date, but need better naming)
strain : varchar(128) # genetic strain
dob = null: date # date of birth
date = null: date # recording date
age = null: smallint # nunmber of days (date - dob)
slicetype: varchar(128) # what kind of slice prep
external: varchar(128) # external solution
internal: varchar(128) # internal solution
animal_comment = '': varchar(256) # general comments
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
animal_info, _ = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
key['id'] = animal_info['id']
key['strain'] = animal_info['strain']
if not pd.isnull(animal_info['DOB']): key['dob'] = animal_info['DOB']
if not pd.isnull(animal_info['age']): key['age'] = animal_info['age']
key['date'] = animal_info['date']
key['slicetype'] = animal_info['type']
key['external'] = animal_info['external']
key['internal'] = animal_info['internal']
if not pd.isnull(animal_info['comment']): key['animal_comment'] = animal_info['comment']
self.insert1(row=key)
return
@schema
class PatchCells(dj.Imported):
definition = """
# Patch clamp metadata for each cell
-> EphysExperimentsForAnalysis
cell: varchar(128) # cell id
---
rp = null: float # pipette resistance
cm_est = null: float # estimated Cm
ra_est = null: float # estimated Ra right after whole-cell mode
rm_est = null: float # estimated Rm
v_rest = null: float # resting membrane potential
fluor = '': varchar(128) # fluorescent label
fill = 'no': enum('yes', 'no', 'unknown', 'out') # wether the cell is biocytin filled. Out -- cell came out with pipette.
cell_external = '': varchar(128) # external if different from sample metadata
cell_internal = '': varchar(128) # internal if different from sample metadata
depth = '': varchar(128) # microns beneath slice surface
location = '': varchar(128) # spatial location
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
_, metadata = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
if 'params' in metadata.columns:
old_file = True
cell_info = parse_cell_info_2017_vertical(metadata)
else:
old_file = False
cell_info = parse_cell_info_2017(metadata)
for i, row in cell_info.iterrows():
newkey = {}
newkey['experiment'] = key['experiment']
newkey['cell'] = row['cell']
if not | pd.isnull(row['Rp']) | pandas.isnull |
import streamlit as st
from streamlit_folium import folium_static
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import geopandas as gpd
import folium
from folium import Choropleth, Circle, Marker
from folium.plugins import HeatMap, MarkerCluster
import plotly.express as px
#importar los datos
exp = | pd.read_csv('exportaciones.csv') | pandas.read_csv |
import pandas as pd
import streamlit as st
from optimizer import Optimizer
from database import PersonSettings, TaskSettings, Database
def css_color_white_for_zero(float_val: float) -> str:
color = "white" if float_val == 0 else "black"
return f"color: {color}"
def md_heading(
content: str, level: int, color: str = "#f63366", is_sidebar=False):
md = f"<h{level} style='text-align: center; color: {color};'>" \
f"{content}</h{level}>"
if is_sidebar:
st.sidebar.markdown(md, unsafe_allow_html=True)
else:
st.markdown(md, unsafe_allow_html=True)
def main():
db = Database()
APP_TITLE = "Auto Assignment"
st.set_page_config(
page_title=APP_TITLE,
layout="wide",
initial_sidebar_state="auto")
md_heading(APP_TITLE, 1)
st.info(
":information_source: "
"Technical explanation of this app is provided in on Macnica's blog, "
"which is written in Japanese language:"
" [mathematical optimization]"
"(https://mnb.macnica.co.jp/2021/11/Python-Staffing01.html), "
"[application prototyping]"
"(https://mnb.macnica.co.jp/2022/02/ai/Python-Staffing02.html)")
st.write(
"Developed by [***<NAME>***](https://twiitter.com/kazukiigeta)")
st.write("")
st.write("This app provides you automatic assignment of tasks, "
"which is based on mathematical optimization. The following "
"two conditions can be overwridden by uploading CSV files "
"from the widgets on the left side bar. "
"And the other conditions are also changable there.")
st.write("- Person setting: names of people and their available time")
st.write("- Task setting: names of tasks and their requiring time")
md_heading(content="Settings", level=1, is_sidebar=True)
st.sidebar.write("### Assignment settings")
# Read from DB
df_person_settings = db.read_table_to_df(PersonSettings)
df_task_settings = db.read_table_to_df(TaskSettings)
optimizer = Optimizer(df_person_settings=df_person_settings,
df_task_settings=df_task_settings)
st.sidebar.write("")
sr_full_name = optimizer.person_full_name()
for full_name in sr_full_name:
with st.sidebar.expander(full_name):
tasks = st.multiselect(
label="Fixed assignment",
options=optimizer.df_task_settings["task_name"],
key=f"fix-{full_name}")
if len(tasks) > 0:
sr_n_person = optimizer.sr_n_person()
for task in tasks:
task_number = optimizer.task_number(task)
if sr_n_person[task_number] != 0:
max_value = (
float(
optimizer.df_task_settings
.query(f"task_name == '{task}'")
.loc[:, "hour"]
)
)
hour_setting = st.number_input(
label=f"{task} "
f"man hour(0.00 means equally distribution)",
min_value=0.0,
max_value=max_value,
step=0.01,
key=f"{full_name}-{task}"
)
optimizer.fix_condition(person_full_name=full_name,
task_name=task,
hour=hour_setting)
st.sidebar.write("")
# Upload person settings CSV
uploaded_person_settings = st.sidebar.file_uploader(
"Upload person setting CSV", type="csv")
if uploaded_person_settings is not None:
df_person_for_assertion = | pd.read_csv(uploaded_person_settings) | pandas.read_csv |
import os
import mlflow
import pandas as pd
import torch
def load_model(prev_runid, model, device):
try:
run = mlflow.get_run(prev_runid)
except:
return model
model_dir = run.info.artifact_uri + "/model/data/model.pth"
if model_dir[:7] == "file://":
model_dir = model_dir[7:]
if os.path.isfile(model_dir):
model_loaded = torch.load(model_dir, map_location=device)
model.load_state_dict(model_loaded.state_dict())
print("Model restored from " + prev_runid + "\n")
else:
print("No model found at" + prev_runid + "\n")
return model
def create_model_dir(path_results, runid):
path_results += runid + "/"
if not os.path.exists(path_results):
os.makedirs(path_results)
print("Results stored at " + path_results + "\n")
return path_results
def save_model(model):
mlflow.pytorch.log_model(model, "model")
def save_csv(data, fname):
# create file if not there
path = mlflow.get_artifact_uri(artifact_path=fname)
if path[:7] == "file://": # to_csv() doesn't work with 'file://'
path = path[7:]
if not os.path.isfile(path):
mlflow.log_text("", fname)
| pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import sys, os
print('''
batch_name = sys.argv[1]
# e.g., _n5_b1.0_h10_epoch5_dataarmman_
file_token = sys.argv[2]
seed_ub = int(sys.argv[3])
''')
# python3 combine_batch_data.py armman_v1 _n5_b1.0_h10_epoch5_dataarmman_ 49
# e.g., armman_v1
batch_name = sys.argv[1]
# e.g., _n5_b1.0_h10_epoch5_dataarmman_
file_token = sys.argv[2]
seed_lb = 0
seed_ub = int(sys.argv[3])
if len(sys.argv) > 4:
seed_lb = int(sys.argv[3])
seed_ub = int(sys.argv[4])
dir_prefix = os.path.join('batches', batch_name)
file_suffix = file_token + 's%i.csv'
merged_file_suffix = file_token+'merged.csv'
### Merge equilibriums
eq_dir_prefix = os.path.join(dir_prefix,'equilibriums')
## nature
# before
fname_prefix = 'nature_eq_before'
nature_eq_fname = fname_prefix + file_suffix
dfs = []
files_not_found = {fname_prefix:[]}
for s in range(seed_lb, seed_ub):
fname = os.path.join(eq_dir_prefix, nature_eq_fname)
fname = fname % s
try:
df = pd.read_csv(fname)
dfs.append(df)
except FileNotFoundError:
print('couldnt find',fname)
files_not_found[fname_prefix].append(s)
merged_nature_eq_fname = fname_prefix + merged_file_suffix
merged_nature_eq_fname = os.path.join(eq_dir_prefix, merged_nature_eq_fname)
nature_eq_dfs = pd.concat(dfs)
print(merged_nature_eq_fname)
nature_eq_dfs.to_csv(merged_nature_eq_fname, index=False)
# after
fname_prefix = 'nature_eq_after'
nature_eq_fname = fname_prefix + file_suffix
dfs = []
files_not_found = {fname_prefix:[]}
for s in range(seed_lb, seed_ub):
fname = os.path.join(eq_dir_prefix, nature_eq_fname)
fname = fname % s
try:
df = | pd.read_csv(fname) | pandas.read_csv |
import time
import pandas as pd
from AT.global_objects import server_to_sEclss_queue, server_to_hera_queue
def get_param_values(sensor_data):
new_row = {}
tf_info_dict = {'parameter': ['display_name',
'kg_name',
'group',
'units',
'low_warning_threshold',
'low_caution_threshold',
'nominal',
'high_caution_threshold',
'high_warning_threshold', ]}
for item in sensor_data:
name = item['Name']
nominal = item['NominalValue']
simulated_value = item['SimValue']
units = item['Unit']
group = item['ParameterGroup']
lct = item['LowerWarningLimit']
lwt = item['LowerCautionLimit']
hwt = item['UpperCautionLimit']
hct = item['UpperWarningLimit']
display_name = name + ' (' + group + ')'
kg_name = name
tf_info_dict[display_name] = [display_name, kg_name, group, units, lct, lwt, nominal, hwt, hct]
new_row[display_name] = simulated_value
tf_info = pd.DataFrame(tf_info_dict)
tf_info = tf_info.set_index('parameter')
parsed_sensor_data = {'new_values': new_row, 'info': tf_info}
return parsed_sensor_data
def get_hss_param_values(sensor_data):
new_row = {}
tf_info_dict = {'parameter': ['display_name',
'kg_name',
'group',
'units',
'low_warning_threshold',
'low_caution_threshold',
'nominal',
'high_caution_threshold',
'high_warning_threshold', ]}
for item in sensor_data:
name = item['Name']
nominal = item['NominalValue']
simulated_value = item['SimValue']
units = item['Unit']
group = item['ParameterGroup']
lct = item['LowerWarningLimit']
lwt = item['LowerCautionLimit']
hwt = item['UpperCautionLimit']
hct = item['UpperWarningLimit']
display_name = name + ' (' + group + ')'
kg_name = name
tf_info_dict[display_name] = [display_name, kg_name, group, units, lct, lwt, nominal, hwt, hct]
new_row[display_name] = simulated_value
tf_info = | pd.DataFrame(tf_info_dict) | pandas.DataFrame |
import pandas as pd
import os
import logging
import yfinance as yf
import time
import numpy as np
import mysql.connector
import logging
import sys
from datetime import datetime
log_filename = 'log_stocks_' + time.strftime("%Y-%m-%d %H;%M;%S", time.gmtime()) + '_run' + '.log'
if sys.platform == 'darwin':
log_filepath = os.path.join("/Users/ondrejkral/GitHub/stocks_games" + "/stocks_logs/" + log_filename)
else:
log_filepath = os.path.join("/home/pi/Documents/GitHub/stocks_games" + "/stocks_logs/" + log_filename)
logging.basicConfig(filename=log_filepath, level=logging.DEBUG, format='%(asctime)s:%(lineno)d:%(message)s')
start = time.time()
# Load stock tickers
if sys.platform == 'darwin':
tickers_df = pd.read_csv('/Users/ondrejkral/GitHub/stocks_games/stock_list.csv', encoding="ISO-8859-1")
from python_dev.functions import getConfigFile
else:
tickers_df = pd.read_csv('/home/pi/Documents/GitHub/stocks_games/stock_list.csv', encoding="ISO-8859-1")
from functions import getConfigFile
tickers_df_index = tickers_df.set_index('Ticker')
config_conn = getConfigFile()
conn = mysql.connector.connect(
host=config_conn.sql_hostname.iloc[0],
user=config_conn.sql_username.iloc[0],
passwd=config_conn.sql_password.iloc[0],
database=config_conn.sql_main_database.iloc[0],
port=config_conn.sql_port[0]
)
query = '''SELECT * FROM existing_stocks;'''
stocks_list = | pd.read_sql_query(query, conn) | pandas.read_sql_query |
# coding:utf8
"""
Description:处理数据缺失值
Author:伏草惟存
Prompt: code in Python3 env
"""
# 处理数据缺失值
# 使用可用特征的均值来填补缺失值
# 使用特殊值来填补缺失值如-1,0
# 忽略有缺失值的样本
# 使用相似样本的均值填补缺失值
# 使用机器学习算法预测缺失值
import numpy
from numpy import *
'''加载数据集'''
def loadDataSet(fileName, delim='\t'):
fr = open(fileName)
stringArr = [line.strip().split(delim) for line in fr.readlines()]
# print(stringArr) # 二维数组
datArr = [list(map(float, line)) for line in stringArr]
# print(mat(datArr))
return mat(datArr)
'''将NaN替换成平均值函数'''
def replaceNanWithMean():
datMat = loadDataSet('../dataSet/files/dataset.data',' ')
numFeat = shape(datMat)
# print(numFeat[1]-1) # 特征数3
for i in range(numFeat[1]-1):
# 对value不为NaN的求均值,A 返回矩阵基于的数组
# print(datMat[nonzero(~isnan(datMat[:, i].A))[0],i]) # 列特征非nan数据
meanVal = mean(datMat[nonzero(~isnan(datMat[:, i].A))[0], i])
# 将value为NaN的值赋值为均值
datMat[nonzero(isnan(datMat[:, i].A))[0],i] = meanVal
return datMat
# if __name__=='__main__':
# 加载数据集
# loadDataSet('../dataSet/files/dataset.data',' ')
# 均值填补缺失值
# datMat = replaceNanWithMean()
# print(datMat)
#************3 Pandas 处理丢失值*******************
# 机器学习和数据挖掘等领域由于数据缺失导致的数据质量差,
# 在模型预测的准确性上面临着严重的问题。
import pandas as pd
import numpy as np
datMat = loadDataSet('../dataSet/files/dataset.data',' ')
df = | pd.DataFrame(datMat) | pandas.DataFrame |
import os, pickle, glob
from pandas.core.reshape.concat import concat
from common.tflogs2pandas import tflog2pandas
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from common.gym_interface import template
if False:
def read_df(body):
dfs = []
for seed in [0,1,2]:
folder = f"output_data/tensorboard_oracle/model-{body}-caseWalker2DHopperWrapper-sd{seed}/PPO_1"
print(f"Loading {folder} ...")
df = tflog2pandas(folder)
df = df[df["metric"]==f"eval/{body}_mean_reward"]
max_value = df["value"].max()
final_value = df.iloc[-1, df.columns.get_loc("value")]
df = pd.DataFrame({
"body": template(body),
"body_id": body,
"max_value": max_value,
"final_value": final_value,
"seed": seed,
}, index=[body])
dfs.append(df)
return | pd.concat(dfs) | pandas.concat |
import tempfile
import time
from datetime import datetime, timedelta
from typing import List
from urllib.parse import urlparse
import fastavro
import pandas as pd
from fastavro import reader as fastavro_reader
from google.cloud import storage
from feast.serving.ServingService_pb2 import GetJobRequest
from feast.serving.ServingService_pb2 import (
Job as JobProto,
JOB_STATUS_DONE,
DATA_FORMAT_AVRO,
)
from feast.serving.ServingService_pb2_grpc import ServingServiceStub
# Maximum no of seconds to wait until the jobs status is DONE in Feast
# Currently set to the maximum query execution time limit in BigQuery
DEFAULT_TIMEOUT_SEC: int = 21600
# Maximum no of seconds to wait before reloading the job status in Feast
MAX_WAIT_INTERVAL_SEC: int = 60
class Job:
"""
A class representing a job for feature retrieval in Feast.
"""
def __init__(self, job_proto: JobProto, serving_stub: ServingServiceStub):
"""
Args:
job_proto: Job proto object (wrapped by this job object)
serving_stub: Stub for Feast serving service
storage_client: Google Cloud Storage client
"""
self.job_proto = job_proto
self.serving_stub = serving_stub
self.storage_client = storage.Client(project=None)
@property
def id(self):
"""
Getter for the Job Id
"""
return self.job_proto.id
@property
def status(self):
"""
Getter for the Job status from Feast Core
"""
return self.job_proto.status
def reload(self):
"""
Reload the latest job status
Returns: None
"""
self.job_proto = self.serving_stub.GetJob(GetJobRequest(job=self.job_proto)).job
def result(self, timeout_sec: int = DEFAULT_TIMEOUT_SEC):
"""
Wait until job is done to get an iterable rows of result.
The row can only represent an Avro row in Feast 0.3.
Args:
timeout_sec: max no of seconds to wait until job is done. If "timeout_sec" is exceeded, an exception will be raised.
Returns: Iterable of Avro rows
"""
max_wait_datetime = datetime.now() + timedelta(seconds=timeout_sec)
wait_duration_sec = 2
while self.status != JOB_STATUS_DONE:
if datetime.now() > max_wait_datetime:
raise Exception(
"Timeout exceeded while waiting for result. Please retry this method or use a longer timeout value."
)
self.reload()
time.sleep(wait_duration_sec)
# Backoff the wait duration exponentially up till MAX_WAIT_INTERVAL_SEC
wait_duration_sec = min(wait_duration_sec * 2, MAX_WAIT_INTERVAL_SEC)
if self.job_proto.error:
raise Exception(self.job_proto.error)
if self.job_proto.data_format != DATA_FORMAT_AVRO:
raise Exception(
"Feast only supports Avro data format for now. Please check "
"your Feast Serving deployment."
)
uris = [urlparse(uri) for uri in self.job_proto.file_uris]
for file_uri in uris:
if file_uri.scheme == "gs":
file_obj = tempfile.TemporaryFile()
self.storage_client.download_blob_to_file(file_uri.geturl(), file_obj)
elif file_uri.scheme == "file":
file_obj = open(file_uri.path, "rb")
else:
raise Exception(
f"Could not identify file URI {file_uri}. Only gs:// and file:// supported"
)
file_obj.seek(0)
avro_reader = fastavro.reader(file_obj)
for record in avro_reader:
yield record
def to_dataframe(self, timeout_sec: int = DEFAULT_TIMEOUT_SEC):
"""
Wait until job is done to get an interable rows of result
Args:
timeout_sec: max no of seconds to wait until job is done. If "timeout_sec" is exceeded, an exception will be raised.
Returns: pandas Dataframe of the feature values
"""
records = [r for r in self.result(timeout_sec=timeout_sec)]
return | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
# we need change this so that constructor path is what tells the engine
# where it needs to go to get the constructor for the sim configs
from apparatus.simple_constructor import sim_configs_constructor
from apparatus.evaluator import evaluate_sim
from simulation import Simulation
# import runpy
import pandas as pd
class Experiment:
def __init__(self, exp_config = {"Name": "Name This Experiment", "Description":"Explain the purpose of this Experiment" ,"Constructor":"path/to/constructor", 'Trials':1 }):
self.config = exp_config
# use the iterator to append unique simulation configs to sim_configs
# self.sim_configs = [] #sim_config objects
self.simcount = 0
self.configcount = 0
self.simulations = []
# use the generate to append unique Simulation objects to simulations
# for sim_config in self.sim_configs:
# simId = self.size
# sim = Simulation(sim_config=sim_config)
# self.simulations.append({"Simulation Id": simId, "Simulation":sim, "Complete":False })
# self.size +=1
def set_config(self, exp_config):
self.config = exp_config
def construct_sim_configs(self):
# Placeholder/Reminder that we'll need
# to get the "dynamics" characterized by a
# runstep function or equivalent
# built from our PSUBs
#runpy.run_path(self.config["Constructor"])
sim_configs = sim_configs_constructor()
#running the iterator script must result in a list of
#valid sim_configs
return sim_configs
def generate(self):
sim_configs = self.construct_sim_configs()
monte_carlo_number = self.config['Trials']
for sim_config in sim_configs:
for n in range(monte_carlo_number):
#simId = self.simcount
#print(simId)
sim = Simulation(sim_config=sim_config)
self.simulations.append({"Simulation Id": self.simcount, "Config Id":self.configcount, "Simulation":sim, "Trial":n,"Complete":False })
self.simcount +=1
self.configcount +=1
def execute(self):
for simRecord in self.simulations:
if not(simRecord["Complete"]):
sim = simRecord["Simulation"]
sim.run()
simRecord["Complete"] = True
def evaluate(self):
for simRecord in self.simulations:
if simRecord["Complete"]:
sim = simRecord['Simulation']
evals = evaluate_sim(sim)
print("evals")
print(evals)
print("")
simRecord["Evaluations"] = {}
for k in evals.keys():
print(k)
simRecord["Evaluations"][k] = evals[k]
def get_records(self, as_df=True):
#Export!
records = []
for simRecord in self.simulations:
if simRecord["Complete"]:
records.append(simRecord)
if as_df:
return | pd.DataFrame(records) | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
| pd.set_option('display.unicode.east_asian_width', True) | pandas.set_option |
from flask import Flask, render_template, url_for, request
import pandas as pd
import numpy as np
from collections import Counter
import re
api_key='ef83cac599a8b6b59be433cffe4aa715'
url='https://api.themoviedb.org/3/search/movie?api_key=' +api_key+ '&query='
poster_url='https://image.tmdb.org/t/p/original'
import pandas as pd
import numpy as np
import json
import requests
app = Flask(__name__)
@app.after_request
def add_header(response):
response.cache_control.max_age = 0
return response
@app.route('/')
def index():
df=pd.read_csv("temp1.csv");
movies = getmovies()
actorname=get_actornames(df)
directorname=get_directornames(df)
productionname=get_productionnames(df)
return render_template("index.html", movies=movies, actors=actorname, prod=productionname, director=directorname)
def getmovies():
df = pd.read_csv("temp1.csv")
a = df['Title']
return a.values.tolist()
@app.route('/about')
def about_us():
return render_template('about.html')
@app.route('/licence')
def licence():
return render_template('licence.html')
@app.route('/predict', methods=['POST'])
def predict():
m = ''
m = request.form['movie_name']
m = m.split("~")
if m[0] == 'movie':
m=m[1]
m=m.replace(''',"'")
res=compute_cosine(m)
k= get_data(list(res['Title']))
data,er=get_data(m)
cst=cast_dat(data['id'])
direc1=direc(data['id'])
a=get_comm(data['id'])
gen=get_genres(data['id'])
data=format_dat(er,data,k,cst,a,direc1,gen)
movies=getmovies()
df=pd.read_csv("temp1.csv")
actorname=get_actornames(df)
directorname=get_directornames(df)
productionname=get_productionnames(df)
return render_template("result.html" ,data=data ,l=list(res['Title']),movies=movies,actors=actorname,prod=productionname,director=directorname)
if m[0]== 'actor':
m=m[1]
x=getactor_details(m)
movies=getmovies()
recom = get_reco_by_actor(m)
k= get_data(recom)
x['reco_movie'] = recom
rec=[]
w=0
for i in k:
a= get_poster(recom[w],k[i]['data']['backdrop_path'])
rec.append(a)
w+=1
x['profile']=rec
df=pd.read_csv("temp1.csv")
actorname=get_actornames(df)
directorname=get_directornames(df)
productionname=get_productionnames(df)
return render_template("actor.html",data=x,movies=movies,actors=actorname,prod=productionname,director=directorname)
if m[0]== 'prod':
m=m[1]
x=get_prod(m)
movies=getmovies()
recom = get_reco_by_company(m)
k= get_data(recom)
x['reco_movie'] = recom
rec=[]
w=0
for i in k:
a= get_poster(recom[w],k[i]['data']['backdrop_path'])
rec.append(a)
w+=1
x['profile']=rec
df=pd.read_csv("temp1.csv")
actorname=get_actornames(df)
directorname=get_directornames(df)
productionname=get_productionnames(df)
return render_template("prod.html",data=x,movies=movies,actors=actorname,prod=productionname,director=directorname)
if m[0]=='dir':
m=m[1]
x=get_director(m)
movies=getmovies()
recom = get_reco_by_director(m)
k= get_data(recom)
x['reco_movie'] = recom
rec=[]
w=0
for i in k:
a= get_poster(recom[w],k[i]['data']['backdrop_path'])
rec.append(a)
w+=1
x['profile']=rec
df=pd.read_csv("temp1.csv")
movies=getmovies()
actorname=get_actornames(df)
directorname=get_directornames(df)
productionname=get_productionnames(df)
return render_template("director.html",data=x,movies=movies,actors=actorname,prod=productionname,director=directorname)
def compute_cosine(a):
import pandas as pd
df=pd.read_csv("temp1.csv")
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
c=CountVectorizer()
i= pd.Series(df.index, index=df['Title'])
ti=i[a]
cm1=c.fit_transform(df['Attributes'])
cosmi=cosine_similarity(cm1)
cosmi=cosmi[ti]
cosmi=list(enumerate(cosmi))
cosmi=sorted(cosmi,key=(lambda i: i[1]),reverse=True)
cosmi=cosmi[1:]
mvs = [df['Title'][i[0]] for i in cosmi]
cosmi=pd.DataFrame(np.column_stack((cosmi,mvs)),columns=['Based Index','Score','Title'])
return cosmi[1:11]
def get_reco_by_actor(s):
df = pd.read_csv('temp2.csv')
t=0
df3=pd.DataFrame()
for i in range(len(df)):
if((df["actor1"][i]==s) or (df["actor2"][i]==s )or( df["actor3"][i]==s)):
df3[t]=df.iloc[i]
df3= | pd.concat([df3, df.iloc[i]], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.stats import t
mpl.rcParams["axes.labelsize"] = 28
mpl.rcParams['xtick.labelsize']= 20
mpl.rcParams['ytick.labelsize']= 20
def plot_hierarch(posterior, hist=False):
# Labels for plots
labels = [r'$\nu$', r'$\hat\mu$', r'$\hat\sigma$']
# Remove burn-in
posterior_df = | pd.DataFrame(posterior[50:], columns=labels) | pandas.DataFrame |
# Copyright 2021 <NAME>, spideynolove @ GitHub
# See LICENSE for details.
__author__ = '<NAME> @spideynolove in GitHub'
__version__ = '0.0.1'
# mimic pro code
# from .technical import technical_indicators, moving_averages, pivot_points
import investpy as iv
import os
import numpy as np
import pandas as pd
import datetime
import re
from settings import *
from functools import reduce
from pprint import pprint
'''
# --------- investpy market folder path
equity_path = 'investpy/equitiesdata/'
crypto_path = 'investpy/cryptodata/'
'''
# today = datetime.date.today().strftime("%d/%m/%Y")
today = '19/08/2021'
def convert_date(date):
return date.strftime("%d/%m/%Y")
def calculate_stats(source=combine_path, periods=13,
quotes='cor_bond', interval='Daily'):
df = | pd.read_csv(source+f'{quotes}_{interval}.csv') | pandas.read_csv |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div/div" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).get_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replace('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Folie():
shop_id = 28
name = 'folie'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.folief.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Corban():
shop_id = 29
name = 'corban'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.corban.com.tw/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS"
try:
chrome.get(url)
except:
break
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Gmorning():
shop_id = 30
name = 'gmorning'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.gmorning.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def July():
shop_id = 31
name = 'july'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.july2017.co/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Per():
shop_id = 32
name = 'per'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.perdot.com.tw/categories/all?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cereal():
shop_id = 33
name = 'cereal'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cerealoutfit.com/new/page/" + str(p) + "/"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
try:
chrome.find_element_by_xpath(
"//button[@class='mfp-close']").click()
except:
pass
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/h3/a" % (i,)).text
if(title == ""):
i += 1
if(i == 25):
p += 1
continue
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@data-loop='%i']" % (i,)).get_attribute('126-id')
pic_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//ins//bdi" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//del//bdi" % (i,)).text
ori_price = ori_price.rstrip(' NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[2]//span[@class='woocommerce-Price-amount amount']" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Jcjc():
shop_id = 35
name = 'jcjc'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.jcjc-dailywear.com/collections/in-stock?limit=24&page=" + \
str(p) + "&sort=featured"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a[1][@href]" % (i,)).get_attribute('href')
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/span/a/img" % (i,)).get_attribute('src')
page_id = pic_link[pic_link.find("i/")+2:pic_link.find(".j")]
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/s/span" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Ccshop():
shop_id = 36
name = 'ccshop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.ccjshop.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Iris():
shop_id = 37
name = 'iris'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.irisgarden.com.tw/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='boxify-item product-item ng-isolate-scope'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nook():
shop_id = 39
name = 'nook'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.nooknook.me/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Greenpea():
shop_id = 40
name = 'greenpea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.greenpea-tw.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Queen():
shop_id = 42
name = 'queen'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.queenshop.com.tw/zh-TW/QueenShop/ProductList?item1=01&item2=all&Page=" + \
str(p) + "&View=4"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "SaleID=")
pic_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/img[1]" % (i,)).get_attribute('data-src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cozyfee():
shop_id = 48
name = 'cozyfee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cozyfee.com/product.php?page=" + \
str(p) + "&cid=55#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div[1]/a/img[1]" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[3]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Reishop():
shop_id = 49
name = 'reishop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.reishop.com.tw/pdlist2.asp?item1=all&item2=&item3=&keyword=&ob=A&pagex=&pageno=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 31):
try:
title = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("yano=YA")
page_id = page_id.replace("&color=", "")
pic_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span/img[1]" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 31):
p += 1
continue
i += 1
if(i == 31):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Yourz():
shop_id = 50
name = 'yourz'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.yourz.com.tw/product/category/34/1/" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 13):
try:
title = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/detail/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/font" % (i,)).text
sale_price = sale_price.replace('VIP價:NT$ ', '')
sale_price = sale_price.rstrip('元')
ori_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/br" % (i,)).text
ori_price = ori_price.replace('NT$ ', '')
ori_price = ori_price.rstrip('元')
except:
i += 1
if(i == 13):
p += 1
continue
i += 1
if(i == 13):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Seoulmate():
shop_id = 54
name = 'seoulmate'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = | pd.DataFrame() | pandas.DataFrame |
import time
from typing import List, Tuple, Dict, Optional
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from .helpers import Float, Frame, Rebalance
from .assets import AssetList
class EfficientFrontierReb(AssetList):
"""
Efficient Frontier (EF) for rebalanced portfolios.
Rebalancing periods could be:
'Y' - one Year (default)
'N' - not rebalanced portfolios
Asset labels are set with 'tickers':
True - for tickers
False - for full asset names
TODO: Add bounds
"""
def __init__(self,
symbols: List[str], *,
first_date: str = None,
last_date: str = None,
ccy: str = 'USD',
inflation: bool = True,
reb_period: str = 'Y',
n_points: int = 20,
verbose: bool = False,
tickers: bool = True,
):
if len(symbols) < 2:
raise ValueError('The number of symbols cannot be less than two')
super().__init__(symbols=symbols, first_date=first_date, last_date=last_date, ccy=ccy, inflation=inflation)
# above we already stated reb_period: str, n_points: int, tickers: bool, verbose: bool
self.reb_period: str = reb_period
self.n_points: int = n_points
# from first glance a reader would expect tickers to be a list(like symbols) not a bool
self.tickers: bool = tickers
self.verbose: bool = verbose
@property
def n_points(self):
return self._n_points
@n_points.setter
def n_points(self, n_points: int):
# already checked in signature
if not isinstance(n_points, int):
raise ValueError('n_points should be an integer')
self._n_points = n_points
@property
def reb_period(self):
return self._reb_period
@reb_period.setter
def reb_period(self, reb_period: str):
# Y/N is Yes/No for common sense
if reb_period not in ['Y', 'N']:
raise ValueError('reb_period: Rebalancing period should be "Y" - year or "N" - not rebalanced.')
self._reb_period = reb_period
@property
def tickers(self):
return self._tickers
@tickers.setter
def tickers(self, tickers: bool):
# already checked in signature
if not isinstance(tickers, bool):
raise ValueError('tickers should be True or False')
self._tickers = tickers
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose: bool):
if not isinstance(verbose, bool):
raise ValueError('verbose should be True or False')
self._verbose = verbose
@property
def gmv_monthly_weights(self) -> np.ndarray:
"""
Returns the weights of the Global Minimum Volatility portfolio with monthly values of risk / return
"""
ror = self.ror
period = self.reb_period
n = self.ror.shape[1]
init_guess = np.repeat(1 / n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# Set the objective function
def objective_function(w):
risk = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period).std()
return risk
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
weights = minimize(objective_function,
init_guess,
method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
@property
def gmv_annual_weights(self) -> np.ndarray:
"""
Returns the weights of the Global Minimum Volatility portfolio with annualized values of risk / return
"""
ror = self.ror
period = self.reb_period
n = self.ror.shape[1]
init_guess = np.repeat(1 / n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# Set the objective function
def objective_function(w):
ts = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period)
mean_return = ts.mean()
risk = ts.std()
return Float.annualize_risk(risk=risk, mean_return=mean_return)
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
weights = minimize(objective_function,
init_guess,
method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def _get_gmv_monthly(self) -> Tuple[float]:
"""
Returns the risk and return (mean, monthly) of the Global Minimum Volatility portfolio
"""
gmv_monthly = (
Rebalance.rebalanced_portfolio_return_ts(self.gmv_monthly_weights, self.ror, period=self.reb_period).std(),
Rebalance.rebalanced_portfolio_return_ts(self.gmv_monthly_weights, self.ror, period=self.reb_period).mean()
)
return gmv_monthly
@property
def gmv_annual_values(self) -> Tuple[float]:
"""
Returns the annual risk (std) and CAGR of the Global Minimum Volatility portfolio.
"""
returns = Rebalance.rebalanced_portfolio_return_ts(self.gmv_annual_weights, self.ror, period=self.reb_period)
# 12 -> _MONTHS_PER_YEAR?
gmv = (
Float.annualize_risk(returns.std(), returns.mean()),
(returns + 1.).prod()**(12/returns.shape[0]) - 1.
)
return gmv
@property
def max_return(self) -> dict:
"""
Returns the weights and risk / CAGR of the maximum return portfolio point.
"""
ror = self.ror
period = self.reb_period
n = self.ror.shape[1] # Number of assets
init_guess = np.repeat(1 / n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# Set the objective function
def objective_function(w):
# Accumulated return for rebalanced portfolio time series
objective_function.returns = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period)
accumulated_return = (objective_function.returns + 1.).prod() - 1.
return - accumulated_return
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
weights = minimize(objective_function,
init_guess,
method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
portfolio_ts = objective_function.returns
mean_return = portfolio_ts.mean()
portfolio_risk = portfolio_ts.std()
# 12 -> _MONTHS_PER_YEAR?
point = {
'Weights': weights.x,
'CAGR': (1 - weights.fun) ** (12 / self.ror.shape[0]) - 1,
'Risk': Float.annualize_risk(portfolio_risk, mean_return),
'Risk_monthly': portfolio_risk
}
return point
def minimize_risk(self, target_return: float) -> Dict[str, float]:
"""
Returns the optimal weights and risk / cagr values for a min risk at the target cagr.
"""
ror = self.ror
period = self.reb_period
n = ror.shape[1] # number of assets
init_guess = np.repeat(1 / n, n) # initial weights
def objective_function(w):
# annual risk
ts = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period)
risk_monthly = ts.std()
mean_return = ts.mean()
result = Float.annualize_risk(risk_monthly, mean_return)
return result
def cagr(w):
ts = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period)
acc_return = (ts + 1.).prod() - 1.
# 12 -> _MONTHS_PER_YEAR?
return (1. + acc_return)**(12 / ror.shape[0]) - 1.
# construct the constraints
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples for Weights constrains
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
cagr_is_target = {'type': 'eq',
'fun': lambda weights: target_return - cagr(weights)
}
weights = minimize(objective_function,
init_guess,
method='SLSQP',
options={'disp': False,
'maxiter': 100,
'ftol': 1e-06,
},
constraints=(weights_sum_to_1, cagr_is_target),
bounds=bounds)
# Calculate points of EF given optimal weights
if weights.success:
# maybe the check should be for self.symbols?
asset_labels = self.symbols if self.tickers else list(self.names.values())
point = {x: y for x, y in zip(asset_labels, weights.x)}
# mixed case naming can be confusing
point['CAGR'] = target_return
point['Risk'] = weights.fun
else:
raise Exception(f'There is no solution for target cagr {target_return}.')
return point
def _maximize_risk_trust_constr(self, target_return: float) -> Dict[str, float]:
"""
Returns the optimal weights and rick / cagr values for a max risk at the target cagr.
"""
ror = self.ror
period = self.reb_period
n = ror.shape[1] # number of assets
init_guess = np.repeat(0, n)
init_guess[self.max_annual_risk_asset['list_position']] = 1.
risk_limit = self.gmv_annual_values[0]
def objective_function(w):
# - annual risk
ts = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period)
risk_monthly = ts.std()
mean_return = ts.mean()
result = - Float.annualize_risk(risk_monthly, mean_return)
return result
def cagr(w):
ts = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period)
acc_return = (ts + 1.).prod() - 1.
# 12 -> _MONTHS_PER_YEAR?
return (1. + acc_return)**(12 / ror.shape[0]) - 1.
# construct the constraints
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples for Weights constrains
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
cagr_is_target = {'type': 'eq',
'fun': lambda weights: target_return - cagr(weights)
}
risk_is_above = {'type': 'ineq',
'fun': lambda weights: - objective_function(weights) - risk_limit
}
weights = minimize(objective_function,
init_guess,
method='trust-constr',
options={'disp': False,
'gtol': 1e-6,
'xtol': 1e-8,
# 'barrier_tol': 1e-01,
'maxiter': 100,
'factorization_method': 'QRFactorization',
'verbose': 0,
},
constraints=(weights_sum_to_1, cagr_is_target, risk_is_above),
bounds=bounds)
# Calculate points of EF given optimal weights
if weights.success:
if not self.tickers:
asset_labels = list(self.names.values())
else:
asset_labels = self.symbols
point = {x: y for x, y in zip(asset_labels, weights.x)}
point['CAGR'] = target_return
point['Risk'] = - weights.fun
else:
raise Exception(f'There is no solution for target cagr {target_return}.')
return point
def maximize_risk(self, target_return: float) -> Dict[str, float]:
"""
Returns the optimal weights and rick / cagr values for a max risk at the target cagr.
"""
ror = self.ror
period = self.reb_period
n = ror.shape[1] # number of assets
init_guess = np.repeat(0, n)
init_guess[self.max_cagr_asset_right_to_max_cagr['list_position']] = 1.
def objective_function(w):
# annual risk
ts = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period)
risk_monthly = ts.std()
mean_return = ts.mean()
result = - Float.annualize_risk(risk_monthly, mean_return)
return result
def cagr(w):
ts = Rebalance.rebalanced_portfolio_return_ts(w, ror, period=period)
acc_return = (ts + 1.).prod() - 1.
# 12 -> _MONTHS_PER_YEAR?
return (1. + acc_return)**(12 / ror.shape[0]) - 1.
# construct the constraints
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples for Weights constrains
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
cagr_is_target = {'type': 'eq',
'fun': lambda weights: target_return - cagr(weights)
}
weights = minimize(objective_function,
init_guess,
method='SLSQP',
options={'disp': False,
'ftol': 1e-06,
'maxiter': 100,
},
constraints=(weights_sum_to_1, cagr_is_target),
bounds=bounds)
# Calculate points of EF given optimal weights
if weights.success:
if not self.tickers:
asset_labels = list(self.names.values())
else:
asset_labels = self.symbols
point = {x: y for x, y in zip(asset_labels, weights.x)}
point['CAGR'] = target_return
point['Risk'] = - weights.fun
else:
raise Exception(f'There is no solution for target cagr {target_return}.')
return point
@property
def target_cagr_range_left(self) -> np.ndarray:
"""
Full range of cagr values (from min to max).
"""
max_cagr = self.max_return['CAGR']
min_cagr = Frame.get_cagr(self.ror).min()
target_range = np.linspace(min_cagr, max_cagr, self.n_points)
return target_range
@property
def max_cagr_asset(self):
max_asset_cagr = Frame.get_cagr(self.ror).max()
ticker_with_largest_cagr = Frame.get_cagr(self.ror).nlargest(1, keep='first').index.values[0]
return {'max_asset_cagr': max_asset_cagr,
'ticker_with_largest_cagr': ticker_with_largest_cagr,
'list_position': self.symbols.index(ticker_with_largest_cagr)
}
@property
def max_cagr_asset_right_to_max_cagr(self) -> Optional[dict]:
"""
The asset with max CAGR lieing to the right of max CAGR point (risk is more than self.max_return['Risk']).
Max return point should not be an asset.
"""
tolerance = 0.01 # assets CAGR should be less than max CAGR with certain tolerance
max_cagr_is_not_asset = (self.get_cagr() < self.max_return['CAGR'] * (1 - tolerance)).all()
if max_cagr_is_not_asset:
condition = self.risk_annual.values > self.max_return['Risk']
ror_selected = self.ror.loc[:, condition]
if not ror_selected.empty:
cagr_selected = Frame.get_cagr(ror_selected)
max_asset_cagr = cagr_selected.max()
ticker_with_largest_cagr = cagr_selected.nlargest(1, keep='first').index.values[0]
return {'max_asset_cagr': max_asset_cagr,
'ticker_with_largest_cagr': ticker_with_largest_cagr,
'list_position': self.symbols.index(ticker_with_largest_cagr)
}
@property
def max_annual_risk_asset(self):
max_risk = self.risk_annual.max()
ticker_with_largest_risk = self.risk_annual.nlargest(1, keep='first').index.values[0]
return {'max_annual_risk': max_risk,
'ticker_with_largest_risk': ticker_with_largest_risk,
'list_position': self.symbols.index(ticker_with_largest_risk)
}
@property
def target_cagr_range_right(self) -> Optional[np.ndarray]:
"""
Range of cagr values from the global CAGR max to the max asset cagr
to the right of the max CAGR point (if exists).
"""
if self.max_cagr_asset_right_to_max_cagr:
ticker_cagr = self.max_cagr_asset_right_to_max_cagr['max_asset_cagr']
max_cagr = self.max_return['CAGR']
if not np.isclose(max_cagr, ticker_cagr, rtol=1e-3, atol=1e-05):
k = abs((self.target_cagr_range_left[0] - self.target_cagr_range_left[-1]) / (max_cagr - ticker_cagr))
number_of_points = round(self.n_points / k) + 1
target_range = np.linspace(max_cagr, ticker_cagr, number_of_points)
return target_range[1:] # skip the first point (max cagr) as it presents in the left part of the EF
@property
def target_risk_range(self) -> np.ndarray:
"""
Range of annual risk values (from min risk to max risk).
"""
min_std = self.gmv_annual_values[0]
ticker_with_largest_risk = self.ror.std().nlargest(1, keep='first').index.values[0]
max_std_monthly = self.ror.std().max()
mean_return = self.ror.loc[:, ticker_with_largest_risk].mean()
max_std = Float.annualize_risk(max_std_monthly, mean_return)
target_range = np.linspace(min_std, max_std, self.n_points)
return target_range
@property
def ef_points(self) -> pd.DataFrame:
"""
Returns a DataFrame of points for Efficient Frontier when the Objective Function is the risk (std)
for rebalanced portfolio.
Each point has:
- Weights (float)
- CAGR (float)
- Risk (float)
"""
main_start_time = time.time()
df = pd.DataFrame()
# left part of the EF
for i, target_cagr in enumerate(self.target_cagr_range_left):
start_time = time.time()
row = self.minimize_risk(target_cagr)
df = df.append(row, ignore_index=True)
end_time = time.time()
if self.verbose:
print(f"left EF point #{i + 1}/{self.n_points} is done in {end_time - start_time:.2f} sec.")
# right part of the EF
range_right = self.target_cagr_range_right
if range_right is not None: # range_right can be a DataFrame. Should put and explicit "is not None"
n = len(range_right)
for i, target_cagr in enumerate(range_right):
start_time = time.time()
row = self.maximize_risk(target_cagr)
df = df.append(row, ignore_index=True)
end_time = time.time()
if self.verbose:
print(f"right EF point #{i + 1}/{n} is done in {end_time - start_time:.2f} sec.")
df = Frame.change_columns_order(df, ['Risk', 'CAGR'])
main_end_time = time.time()
if self.verbose:
print(f"Total time taken is {(main_end_time - main_start_time) / 60:.2f} min.")
return df
def get_monte_carlo(self, n: int = 100) -> pd.DataFrame:
"""
Generate N random risk / cagr point for rebalanced portfolios.
Risk and cagr are calculated for a set of random weights.
"""
weights_df = Float.get_random_weights(n, self.ror.shape[1])
# Portfolio risk and cagr for each set of weights
portfolios_ror = weights_df.aggregate(Rebalance.rebalanced_portfolio_return_ts, ror=self.ror, period=self.reb_period)
random_portfolios = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# make GTF file that includes the ORF regions (as CDS features)
# input - pacbio gtf ('jurkat.collapsed.gtf'), orf calls ('jurkat_refine_orf_calls.tsv')
# output - pacbio gtf with added "cds" features (orfs)
# %%
import pandas as pd
import numpy as np
from collections import defaultdict
import copy
import argparse
import gtfparse
import logging
logger = logging.getLogger('cds_logger')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('make_pacbio_cds.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
def string_to_boolean(string):
"""
Converts string to boolean
Parameters
----------
string :str
input string
Returns
----------
result : bool
output boolean
"""
if isinstance(string, bool):
return str
if string.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif string.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_first_block_index(orf_coord, cblens, pblens):
# get the index corresponding to the first block containing the orf start
# return index, and the dela (spacing upstream of end)
for i, cblen in enumerate(cblens):
if orf_coord <= cblen:
delta = cblen - orf_coord
return i, delta
logger.warning(f"ORF COORDINATE IS NOT FOUND WITHIN BLOCKS")
return i, 0
def make_cds_coords_positive_strand(i1, delta1, i2, delta2, coords):
orf_coords = copy.deepcopy(coords)
orf_coords = orf_coords[i1: i2+1]
# trim ends to orf start/end
orf_coords[0][0] = orf_coords[0][1] - delta1
orf_coords[-1][1] = orf_coords[-1][1] - delta2
return orf_coords
def make_cds_coords_negative_strand(i1, delta1, i2, delta2, coords):
orf_coords = copy.deepcopy(coords)
orf_coords = orf_coords[i1: i2+1]
# trim ends to orf start/end
orf_coords[0][1] = orf_coords[0][0] + delta1
orf_coords[-1][0] = orf_coords[-1][0] + delta2
return orf_coords
# def get_first_block_index(orf_coord, cblens, pblens):
# """
# Finds the index of the block where orf_coord location is and the relative
# difference (delta) within the block the coordinate is
# Args:
# orf_coord (int): orf coordinate location
# cblens (numpy 1D array) : cumulative block lengths
# pblens (numpy 1D array ) : prior cumulative block lengths
# Returns:
# i (int): index of orf location
# delta (int): difference within block orf_coord starts
# """
# # get the index corresponding to the first block containing the orf start
# # return index, and the dela (spacing upstream of end)
# # logger.info(f"orf_coord {orf_coord} \ncblens {cblens}")
# for i, (cblen, pblen) in enumerate(zip(cblens, pblens)):
# if orf_coord <= cblen:
# delta = orf_coord - pblen
# return i, delta
# logging.WARNING(f"ORF COORDINATE IS NOT FOUND WITHIN BLOCKS")
# return i, cblen - pblen
# def make_cds_coords_positive_strand(i1, delta1, i2, delta2, coords):
# """
# Makes the CDS coordinates for the positive strand
# Args:
# i1 (int): index of first CDS exon
# delta1 (int) : offset from start of first exon to make CDS
# i2 (int) : index of last CDS exon
# delta2 (int) : delta of end from start - length of last CDS exon
# coords [[int,int]] : coordinates of exons
# Returns:
# [[int,int]]: CDS coordinates
# """
# logger.info(f"\n+\n{i1}\t{delta1}\n{i2}\t{delta2}\n{coords}")
# orf_coords = copy.deepcopy(coords)
# orf_coords = orf_coords[i1: i2+1]
# # trim ends to orf start/end
# orf_coords[0][0] = orf_coords[0][0] + delta1
# orf_coords[-1][1] = orf_coords[-1][0] + delta2
# logger.info(f"\n{orf_coords}")
# return orf_coords
# def make_cds_coords_negative_strand(i1, delta1, i2, delta2, coords):
# """Makes the CDS coordinates for the negative strand
# Args:
# i1 (int): index of start CDS exon
# delta1 (int): offset from end of first exon to make CDS
# i2 ([type]): index of last CDS exon
# delta2 ([type]): offset from last CDS exon end as start location
# coords ([[int,int]]): coordinates of exons
# Returns:
# [[int,int]]: CDS coordinates
# """
# logger.info(f"\n-\n{i1}\t{delta1}\n{i2}\t{delta2}\n{coords}")
# orf_coords = copy.deepcopy(coords)
# orf_coords = orf_coords[i1: i2+1]
# # trim ends to orf start/end
# orf_coords[0][1] = orf_coords[0][1] - delta1 + 2
# orf_coords[-1][0] = orf_coords[-1][1] - delta2 + 2
# logger.info(f"\n{orf_coords}")
# return orf_coords
def ceiling_cpm(cpm, ceiling = 1000):
"""Sets CPM to have ceiling
Args:
cpm (float): CPM of isoform
ceiling (int, optional): Maximum. Defaults to 1000.
Returns:
float: new cpm constrained to ceiling
"""
# gtf top score is 1000
if cpm > ceiling:
return ceiling
else:
return cpm
#%%
def get_min_and_max_coords_from_exon_chain(coords):
"""Gets the minumum and maximum coordinates from exon chain
Args:
coords ([[int, int]]): [[start,end]] exon coordinate chain
Returns:
(int, int): start and end coordinates of entire chain
"""
min_coord = min(map(min, coords))
max_coord = max(map(max, coords))
return min_coord, max_coord
#%%
def make_pacbio_cds_gtf(sample_gtf, refined_orfs, called_orfs, pb_gene, name, include_transcript):
"""Makes PacBio CDS and saves file with CDS
Args:
sample_gtf (filename): sample_gtf file
refined_orfs (filename): aggregate_orf info. from Refined DB
called_orfs (filename): orf calls from ORF_Calling
pb_gene (filename): PacBio gene name cross reference
name (string): name of sample
include_transcript (bool): whether to include transcript in saved gtf file
"""
refined_db = pd.read_table(refined_orfs)
representative_accessions = refined_db['base_acc'].to_list()
# import gtf, only exon info.
# only move forward with representative pb isoform (for same-protein groups)
gtf = gtfparse.read_gtf(sample_gtf)
gtf = gtf[['seqname', 'feature', 'start', 'end', 'strand', 'transcript_id']]
gtf = gtf[gtf['feature'] == 'exon']
gtf.columns = ['chr', 'feat', 'start', 'end', 'strand', 'acc']
# only move forward with "base accession" (representative pb)
gtf = gtf[gtf.acc.isin(representative_accessions)]
# pb coords into dict
pbs = defaultdict(lambda: ['chr', 'strand', [], [], [],[]]) # pb -> [chr, strand, [start, end], [block lengths],[cum. block lengths], [prior cumulative block lengths]]
# PB.1.1 -> ['chr1', '+', [[100,150], [200,270]], [50, 70], [50, 120], [150-200]]
for i, row in gtf.iterrows():
chr, feat, start, end, strand, acc = row
pbs[acc][0] = chr
pbs[acc][1] = strand
pbs[acc][2].append([int(start), int(end)])
# sort all coords, calc blocks
for acc, infos in pbs.items():
strand = infos[1]
if strand == '+':
infos[2] = sorted(infos[2])
elif strand == '-':
infos[2] = sorted(infos[2], reverse=True)
infos[3] = np.array([end-start+1 for [start, end] in infos[2]])
infos[4] = np.cumsum(infos[3])
infos[5] = infos[4] - infos[3]
# read in the ranges of orf on pb transcripts
ranges = pd.read_table(called_orfs)[['pb_acc', 'orf_start', 'orf_end']]
ranges = pd.merge(
ranges, refined_db[['base_acc', 'CPM']],
left_on='pb_acc',
right_on='base_acc',
how='inner')
ranges = ranges[['pb_acc', 'orf_start', 'orf_end', 'CPM']]
# ranges = ranges[ranges['pb_acc'].isin(representative_accessions)]
# read in pb to genename
pb_gene = | pd.read_table(pb_gene) | pandas.read_table |
import pandas as pd
import os, re, fnmatch, subprocess
from collections import defaultdict
from shlex import quote
from igf_data.illumina.samplesheet import SampleSheet
from igf_data.igfdb.igfTables import Experiment, Run
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.igfdb.experimentadaptor import ExperimentAdaptor
from igf_data.igfdb.runadaptor import RunAdaptor
from igf_data.igfdb.collectionadaptor import CollectionAdaptor
from igf_data.igfdb.fileadaptor import FileAdaptor
from igf_data.utils.fileutils import calculate_file_checksum
class Collect_seqrun_fastq_to_db:
'''
A class for collecting raw fastq files after demultiplexing and storing them in database.
Additionally this will also create relevant entries for the experiment and run tables in database
:param fastq_dir: A directory path for file look up
:param model_name: Sequencing platform information
:param seqrun_igf_id: Sequencing run name
:param session_class: A database session class
:param flowcell_id: Flowcell information for the run
:param samplesheet_file: Samplesheet filepath
:param samplesheet_filename: Name of the samplesheet file, default SampleSheet.csv
:param collection_type: Collection type information for new fastq files, default demultiplexed_fastq
:param file_location: Fastq file location information, default HPC_PROJECT
:param collection_table: Collection table information for fastq files, default run
:param manifest_name: Name of the file manifest file, default file_manifest.csv
:param singlecell_tag: Samplesheet description for singlecell samples, default 10X
'''
def __init__(
self,fastq_dir,model_name,seqrun_igf_id,session_class,flowcell_id,
samplesheet_file=None,samplesheet_filename='SampleSheet.csv',
collection_type='demultiplexed_fastq',file_location='HPC_PROJECT',
collection_table='run', manifest_name='file_manifest.csv',singlecell_tag='10X'):
self.fastq_dir = fastq_dir
self.samplesheet_file = samplesheet_file
self.samplesheet_filename = samplesheet_filename
self.seqrun_igf_id = seqrun_igf_id
self.model_name = model_name
self.session_class = session_class
self.collection_type = collection_type
self.file_location = file_location
self.flowcell_id = flowcell_id
self.collection_table = collection_table
self.manifest_name = manifest_name
self.singlecell_tag = singlecell_tag
def find_fastq_and_build_db_collection(self):
'''
A method for finding fastq files and samplesheet under a run directory
and loading the new files to db with their experiment and run information
It calculates following entries
* library_name
Same as sample_id unless mentioned in 'Description' field of samplesheet
* experiment_igf_id
library_name combined with the platform name
same library sequenced in different platform will be added as separate experiemnt
* run_igf_id
experiment_igf_id combined with sequencing flowcell_id and lane_id
collection name: Same as run_igf_id, fastq files will be added to db collection
using this id
* collection type
Default type for fastq file collections are 'demultiplexed_fastq'
* file_location
Default value is 'HPC_PROJECT'
'''
try:
fastq_files_list = \
self._collect_fastq_and_sample_info()
self._build_and_store_exp_run_and_collection_in_db(
fastq_files_list=fastq_files_list)
except Exception as e:
raise ValueError(
'Failed to find fastq and build collection, error: {0}'.\
format(e))
def _get_fastq_and_samplesheet(self):
try:
fastq_dir = self.fastq_dir
samplesheet_file = self.samplesheet_file
samplesheet_filename = self.samplesheet_filename
r1_fastq_regex = \
re.compile(r'\S+_R1_\d+\.fastq(\.gz)?', re.IGNORECASE)
r2_fastq_regex = \
re.compile(r'\S+_R2_\d+\.fastq(\.gz)?', re.IGNORECASE)
samplesheet_list = list()
r1_fastq_list = list()
r2_fastq_list = list()
if os.path.isdir(fastq_dir):
for root, _, files in os.walk(top=fastq_dir):
if samplesheet_filename in files:
samplesheet_list.append(
os.path.join(root,samplesheet_filename))
for file in files:
if not fnmatch.fnmatch(file, 'Undetermined_'):
if r1_fastq_regex.match(file):
r1_fastq_list.\
append(os.path.join(root,file))
elif r2_fastq_regex.match(file):
r2_fastq_list.\
append(os.path.join(root,file))
if len(r2_fastq_list) > 0 and \
len(r1_fastq_list) != len(r2_fastq_list):
raise ValueError(
'R1 {0} and R2 {1}'.format(
len(r1_fastq_list),
len(r2_fastq_list)))
if samplesheet_file is None and \
len(samplesheet_list)==1:
self.samplesheet_file = samplesheet_list[0] # set samplesheet file name
if len(samplesheet_list) > 1:
raise ValueError(
'Found more than one samplesheet file for fastq dir {0}'.\
format(fastq_dir))
if samplesheet_file is None and \
len(samplesheet_list)==0:
raise ValueError(
'No samplesheet file for fastq dir {0}'.\
format(fastq_dir))
elif os.path.isfile(fastq_dir):
if samplesheet_file is None:
raise ValueError(
'Missing samplesheet file for fastq file {0}'.\
format(fastq_dir))
if not fnmatch.fnmatch(file, 'Undetermined_'):
if r1_fastq_regex.match(file):
r1_fastq_list.\
append(os.path.join(root,file))
elif r2_fastq_regex.match(file):
r2_fastq_list.\
append(os.path.join(root,file))
return r1_fastq_list, r2_fastq_list
except Exception as e:
raise ValueError(
'Failed to get fastq and samplesheet, error: {0}'.\
format(e))
@staticmethod
def _link_fastq_file_to_sample(sample_name,r1_fastq_list, r2_fastq_list):
try:
sample_files = \
defaultdict(lambda: defaultdict(lambda: defaultdict()))
r1_regex = \
re.compile(
sample_name+'_S\d+_L(\d+)_R1_\d+\.fastq(\.gz)?',
re.IGNORECASE)
for file1 in r1_fastq_list:
if r1_regex.match(os.path.basename(file1)):
m = r1_regex.match(os.path.basename(file1))
lane_id = m.group(1).strip('0')
sample_files[lane_id]['R1'] = file1
if len(r2_fastq_list) > 0:
r2_regex = \
re.compile(
sample_name+'_S\d+_L(\d+)_R2_\d+\.fastq(\.gz)?',
re.IGNORECASE)
for file2 in r2_fastq_list:
if r2_regex.match(os.path.basename(file2)):
m = r2_regex.match(os.path.basename(file2))
lane_id = m.group(1).strip('0')
sample_files[lane_id]['R2'] = file2
return sample_files
except Exception as e:
raise ValueError(
'Failed to link fastq to sample, error: {0}'.format(e))
def _collect_fastq_and_sample_info(self):
'''
An internal method for collecting fastq and sample info
'''
try:
seqrun_igf_id = self.seqrun_igf_id
model_name = self.model_name
flowcell_id = self.flowcell_id
(r1_fastq_list, r2_fastq_list) = \
self._get_fastq_and_samplesheet()
samplesheet_file = self.samplesheet_file
final_data = list()
samplesheet_sc = \
SampleSheet(infile=samplesheet_file) # read samplesheet for single cell check
samplesheet_sc.\
filter_sample_data(
condition_key='Description',
condition_value=self.singlecell_tag,
method='include') # keep only single cell samples
if len(samplesheet_sc._data) >0:
sc_new_data = \
| pd.DataFrame(samplesheet_sc._data) | pandas.DataFrame |
import os
import multiprocessing
import time
from datetime import datetime
from pprint import pprint
from pathlib import Path
import argparse
import pandas as pd
from pytablewriter import MarkdownTableWriter
from .test_aspirin import test_aspirin
from .test_cockroaches import test_cockroaches
from .test_coin import test_coin
from .test_double_gaussian import test_double_gaussian
from .test_gaussian_log_density import test_gaussian_log_density
from .test_linear_regression import test_linear_regression
from .test_neal_funnel import test_neal_funnel
from .test_schools import test_schools
from .test_seeds import test_seeds
from .harness import Config
tests = [
test_aspirin,
test_cockroaches,
test_coin,
test_double_gaussian,
test_gaussian_log_density,
test_linear_regression,
test_neal_funnel,
test_schools,
test_seeds,
]
name_map = {
"coin": "coin",
"double_gaussian": "double normal",
"gaussian_log_density": "gaussian target",
"neal_funnel": "reparameterization",
"linear_regression": "linear regression",
"aspirin": "aspirin",
"cockroaches": "roaches",
"schools": "8 schools",
"seeds": "seeds",
}
def run(exp):
f, i, config, logdir = exp
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
name = f.__name__
Path(logdir).mkdir(parents=True, exist_ok=True)
filename = os.path.join(logdir, f"{name}_{i}_{now}.log")
try:
results = f(config)
with open(filename, "w") as file:
pprint(results, file)
except (KeyboardInterrupt, SystemExit):
raise
except:
print(f"ERROR test {name}")
raise
def run_all(config=Config(), logdir="logs", n_runs=5):
experiments = [(t, i, config, logdir) for t in tests for i in range(n_runs)]
n_cpu = multiprocessing.cpu_count()
pool = multiprocessing.Pool(n_cpu)
pool.map(run, experiments)
pool.close()
pool.join()
def flatten_scalar(d):
res = {}
for k, v in d.items():
if set(v.keys()) == {"statistic", "pvalue"}: # leaf values
res[k] = v["pvalue"]
else:
for kk, vv in flatten_scalar(v).items():
res[f"{k}[{kk}]"] = vv
return res
def clean_log(f):
with open(f, "r") as log_file:
raw = log_file.read()
log = eval(raw)
res = {}
res["stan"] = {
"compilation": log["timers"]["Stan_Compilation"],
"runtime": log["timers"]["Stan_Runtime"],
}
if "numpyro" in log["divergences"]:
res["numpyro"] = flatten_scalar(log["divergences"]["numpyro"]["ks"])
if "Numpyro_Compilation" in log["timers"]:
res["numpyro"]["compilation"] = log["timers"]["Numpyro_Compilation"]
res["numpyro"]["runtime"] = log["timers"]["Numpyro_Runtime"]
if "pyro" in log["divergences"]:
res["pyro"] = flatten_scalar(log["divergences"]["pyro"]["ks"])
if "Pyro_Compilation" in log["timers"]:
res["pyro"]["compilation"] = log["timers"]["Pyro_Compilation"]
res["pyro"]["runtime"] = log["timers"]["Pyro_Runtime"]
if (
"numpyro_naive" in log["divergences"]
and log["divergences"]["numpyro_naive"]
):
res["numpyro_naive"] = flatten_scalar(
log["divergences"]["numpyro_naive"]["ks"]
)
res["numpyro_naive"]["runtime"] = log["timers"]["Numpyro_naive_Runtime"]
if "pyro_naive" in log["divergences"] and log["divergences"]["pyro_naive"]:
res["pyro_naive"] = flatten_scalar(log["divergences"]["pyro_naive"]["ks"])
res["pyro_naive"]["runtime"] = log["timers"]["Pyro_naive_Runtime"]
return res
def to_frame(dirname):
summary = {}
for x in name_map:
summary[x] = {}
logs = [
clean_log(os.path.join(dirname, f))
for f in os.listdir(dirname)
if f.startswith(f"test_{x}")
]
data = {}
for k in logs[0].keys():
data[k] = pd.DataFrame([pd.Series(d[k]) for d in logs])
summary[x] = pd.Series(data)
return | pd.Series(summary) | pandas.Series |
# standard library imports
import os
import datetime
import re
import math
import copy
import collections
from functools import wraps
from itertools import combinations
import warnings
import pytz
import importlib
# anaconda distribution defaults
import dateutil
import numpy as np
import pandas as pd
# anaconda distribution defaults
# statistics and machine learning imports
import statsmodels.formula.api as smf
from scipy import stats
# from sklearn.covariance import EllipticEnvelope
import sklearn.covariance as sk_cv
# anaconda distribution defaults
# visualization library imports
import matplotlib.pyplot as plt
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from bokeh.palettes import Category10, Category20c, Category20b
from bokeh.layouts import gridplot
from bokeh.models import Legend, HoverTool, tools, ColumnDataSource
# visualization library imports
hv_spec = importlib.util.find_spec('holoviews')
if hv_spec is not None:
import holoviews as hv
from holoviews.plotting.links import DataLink
else:
warnings.warn('Some plotting functions will not work without the '
'holoviews package.')
# pvlib imports
pvlib_spec = importlib.util.find_spec('pvlib')
if pvlib_spec is not None:
from pvlib.location import Location
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.pvsystem import retrieve_sam
from pvlib.modelchain import ModelChain
from pvlib.clearsky import detect_clearsky
else:
warnings.warn('Clear sky functions will not work without the '
'pvlib package.')
plot_colors_brewer = {'real_pwr': ['#2b8cbe', '#7bccc4', '#bae4bc', '#f0f9e8'],
'irr-poa': ['#e31a1c', '#fd8d3c', '#fecc5c', '#ffffb2'],
'irr-ghi': ['#91003f', '#e7298a', '#c994c7', '#e7e1ef'],
'temp-amb': ['#238443', '#78c679', '#c2e699', '#ffffcc'],
'temp-mod': ['#88419d', '#8c96c6', '#b3cde3', '#edf8fb'],
'wind': ['#238b45', '#66c2a4', '#b2e2e2', '#edf8fb']}
met_keys = ['poa', 't_amb', 'w_vel', 'power']
# The search strings for types cannot be duplicated across types.
type_defs = collections.OrderedDict([
('irr', [['irradiance', 'irr', 'plane of array', 'poa', 'ghi',
'global', 'glob', 'w/m^2', 'w/m2', 'w/m', 'w/'],
(-10, 1500)]),
('temp', [['temperature', 'temp', 'degrees', 'deg', 'ambient',
'amb', 'cell temperature', 'TArray'],
(-49, 127)]),
('wind', [['wind', 'speed'],
(0, 18)]),
('pf', [['power factor', 'factor', 'pf'],
(-1, 1)]),
('op_state', [['operating state', 'state', 'op', 'status'],
(0, 10)]),
('real_pwr', [['real power', 'ac power', 'e_grid'],
(-1000000, 1000000000000)]), # set to very lax bounds
('shade', [['fshdbm', 'shd', 'shade'], (0, 1)]),
('pvsyt_losses', [['IL Pmax', 'IL Pmin', 'IL Vmax', 'IL Vmin'],
(-1000000000, 100000000)]),
('index', [['index'], ('', 'z')])])
sub_type_defs = collections.OrderedDict([
('ghi', [['sun2', 'global horizontal', 'ghi', 'global',
'GlobHor']]),
('poa', [['sun', 'plane of array', 'poa', 'GlobInc']]),
('amb', [['TempF', 'ambient', 'amb']]),
('mod', [['Temp1', 'module', 'mod', 'TArray']]),
('mtr', [['revenue meter', 'rev meter', 'billing meter', 'meter']]),
('inv', [['inverter', 'inv']])])
irr_sensors_defs = {'ref_cell': [['reference cell', 'reference', 'ref',
'referance', 'pvel']],
'pyran': [['pyranometer', 'pyran']],
'clear_sky':[['csky']]}
columns = ['pts_after_filter', 'pts_removed', 'filter_arguments']
def update_summary(func):
"""
Todo
----
not in place
Check if summary is updated when function is called with inplace=False.
It should not be.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
pts_before = self.df_flt.shape[0]
if pts_before == 0:
pts_before = self.df.shape[0]
self.summary_ix.append((self.name, 'count'))
self.summary.append({columns[0]: pts_before,
columns[1]: 0,
columns[2]: 'no filters'})
ret_val = func(self, *args, **kwargs)
arg_str = args.__repr__()
lst = arg_str.split(',')
arg_lst = [item.strip("'() ") for item in lst]
# arg_lst_one = arg_lst[0]
# if arg_lst_one == 'das' or arg_lst_one == 'sim':
# arg_lst = arg_lst[1:]
# arg_str = ', '.join(arg_lst)
kwarg_str = kwargs.__repr__()
kwarg_str = kwarg_str.strip('{}')
if len(arg_str) == 0 and len(kwarg_str) == 0:
arg_str = 'no arguments'
elif len(arg_str) == 0:
arg_str = kwarg_str
else:
arg_str = arg_str + ', ' + kwarg_str
pts_after = self.df_flt.shape[0]
pts_removed = pts_before - pts_after
self.summary_ix.append((self.name, func.__name__))
self.summary.append({columns[0]: pts_after,
columns[1]: pts_removed,
columns[2]: arg_str})
if pts_after == 0:
warnings.warn('The last filter removed all data! '
'Calling additional filtering or visualization '
'methods that reference the df_flt attribute will '
'raise an error.')
return ret_val
return wrapper
def cntg_eoy(df, start, end):
"""
Shifts data before or after new year to form a contigous time period.
This function shifts data from the end of the year a year back or data from
the begining of the year a year forward, to create a contiguous time period.
Intended to be used on historical typical year data.
If start date is in dataframe, then data at the beginning of the year will
be moved ahead one year. If end date is in dataframe, then data at the end
of the year will be moved back one year.
cntg (contiguous); eoy (end of year)
Parameters
----------
df: pandas DataFrame
Dataframe to be adjusted.
start: pandas Timestamp
Start date for time period.
end: pandas Timestamp
End date for time period.
Todo
----
Need to test and debug this for years not matching.
"""
if df.index[0].year == start.year:
df_beg = df.loc[start:, :]
df_end = df.copy()
df_end.index = df_end.index + pd.DateOffset(days=365)
df_end = df_end.loc[:end, :]
elif df.index[0].year == end.year:
df_end = df.loc[:end, :]
df_beg = df.copy()
df_beg.index = df_beg.index - pd.DateOffset(days=365)
df_beg = df_beg.loc[start:, :]
df_return = pd.concat([df_beg, df_end], axis=0)
ix_ser = df_return.index.to_series()
df_return['index'] = ix_ser.apply(lambda x: x.strftime('%m/%d/%Y %H %M'))
return df_return
def spans_year(start_date, end_date):
"""
Returns boolean indicating if dates passes are in the same year.
Parameters
----------
start_date: pandas Timestamp
end_date: pandas Timestamp
"""
if start_date.year != end_date.year:
return True
else:
return False
def wrap_seasons(df, freq):
"""
Rearrange an 8760 so a quarterly groupby will result in seasonal groups.
Parameters
----------
df : DataFrame
Dataframe to be rearranged.
freq : str
String pandas offset alias to specify aggregattion frequency
for reporting condition calculation.
Returns
-------
DataFrame
Todo
----
Write unit test
BQ-NOV vs BQS vs QS
Need to review if BQ is the correct offset alias vs BQS or QS.
"""
check_freqs = ['BQ-JAN', 'BQ-FEB', 'BQ-APR', 'BQ-MAY', 'BQ-JUL',
'BQ-AUG', 'BQ-OCT', 'BQ-NOV']
mnth_int = {'JAN': 1, 'FEB': 2, 'APR': 4, 'MAY': 5, 'JUL': 7,
'AUG': 8, 'OCT': 10, 'NOV': 11}
if freq in check_freqs:
warnings.warn('DataFrame index adjusted to be continous through new'
'year, but not returned or set to attribute for user.'
'This is not an issue if using RCs with'
'predict_capacities.')
if isinstance(freq, str):
mnth = mnth_int[freq.split('-')[1]]
else:
mnth = freq.startingMonth
year = df.index[0].year
mnths_eoy = 12 - mnth
mnths_boy = 3 - mnths_eoy
if int(mnth) >= 10:
str_date = str(mnths_boy) + '/' + str(year)
else:
str_date = str(mnth) + '/' + str(year)
tdelta = df.index[1] - df.index[0]
date_to_offset = df.loc[str_date].index[-1].to_pydatetime()
start = date_to_offset + tdelta
end = date_to_offset + pd.DateOffset(years=1)
if mnth < 8 or mnth >= 10:
df = cntg_eoy(df, start, end)
else:
df = cntg_eoy(df, end, start)
return df
else:
return df
def perc_wrap(p):
def numpy_percentile(x):
return np.percentile(x.T, p, interpolation='nearest')
return numpy_percentile
def perc_bounds(perc):
"""
perc_flt : float or tuple, default None
Percentage or tuple of percentages used to filter around reporting
irradiance in the irrRC_balanced function. Required argument when
irr_bal is True.
"""
if isinstance(perc, tuple):
perc_low = perc[0] / 100
perc_high = perc[1] / 100
else:
perc_low = perc / 100
perc_high = perc / 100
low = 1 - (perc_low)
high = 1 + (perc_high)
return (low, high)
def perc_difference(x, y):
"""
Calculate percent difference of two values.
"""
if x == y == 0:
return 0
else:
return abs(x - y) / ((x + y) / 2)
def check_all_perc_diff_comb(series, perc_diff):
"""
Check series for pairs of values with percent difference above perc_diff.
Calculates the percent difference between all combinations of two values in
the passed series and checks if all of them are below the passed perc_diff.
Parameters
----------
series : pd.Series
Pandas series of values to check.
perc_diff : float
Percent difference threshold value as decimal i.e. 5% is 0.05.
Returns
-------
bool
"""
c = combinations(series.__iter__(), 2)
return all([perc_difference(x, y) < perc_diff for x, y in c])
def sensor_filter(df, perc_diff):
"""
Check dataframe for rows with inconsistent values.
Applies check_all_perc_diff_comb function along rows of passed dataframe.
Parameters
----------
df : pandas DataFrame
perc_diff : float
Percent difference as decimal.
"""
if df.shape[1] >= 2:
bool_ser = df.apply(check_all_perc_diff_comb, perc_diff=perc_diff,
axis=1)
return df[bool_ser].index
elif df.shape[1] == 1:
return df.index
def flt_irr(df, irr_col, low, high, ref_val=None):
"""
Top level filter on irradiance values.
Parameters
----------
df : DataFrame
Dataframe to be filtered.
irr_col : str
String that is the name of the column with the irradiance data.
low : float or int
Minimum value as fraction (0.8) or absolute 200 (W/m^2)
high : float or int
Max value as fraction (1.2) or absolute 800 (W/m^2)
ref_val : float or int
Must provide arg when min/max are fractions
Returns
-------
DataFrame
"""
if ref_val is not None:
low *= ref_val
high *= ref_val
df_renamed = df.rename(columns={irr_col: 'poa'})
flt_str = '@low <= ' + 'poa' + ' <= @high'
indx = df_renamed.query(flt_str).index
return df.loc[indx, :]
def filter_grps(grps, rcs, irr_col, low, high, **kwargs):
"""
Apply irradiance filter around passsed reporting irradiances to groupby.
For each group in the grps argument the irradiance is filtered by a
percentage around the reporting irradiance provided in rcs.
Parameters
----------
grps : pandas groupby
Groupby object with time groups (months, seasons, etc.).
rcs : pandas DataFrame
Dataframe of reporting conditions. Use the rep_cond method to generate
a dataframe for this argument.
**kwargs
Passed to pandas Grouper to control label and closed side of intervals.
See pandas Grouper doucmentation for details. Default is left labeled
and left closed.
Returns
-------
pandas groupby
"""
flt_dfs = []
freq = list(grps.groups.keys())[0].freq
for grp_name, grp_df in grps:
ref_val = rcs.loc[grp_name, 'poa']
grp_df_flt = flt_irr(grp_df, irr_col, low, high, ref_val=ref_val)
flt_dfs.append(grp_df_flt)
df_flt = pd.concat(flt_dfs)
df_flt_grpby = df_flt.groupby(pd.Grouper(freq=freq, **kwargs))
return df_flt_grpby
def irrRC_balanced(df, low, high, irr_col='GlobInc', plot=False):
"""
Iteratively calculates reporting irradiance that achieves 40/60 balance.
This function is intended to implement a strict interpratation of common
contract language that specifies the reporting irradiance be determined by
finding the irradiance that results in a balance of points within a
+/- percent range of the reporting irradiance. This function
iterates to a solution for the reporting irradiance by calculating the
irradiance that has 10 datpoints in the filtered dataset above it, then
filtering for a percentage of points around that irradiance, calculating
what percentile the reporting irradiance is in. This procedure continues
until 40% of the points in the filtered dataset are above the calculated
reporting irradiance.
Parameters
----------
df: pandas DataFrame
DataFrame containing irradiance data for calculating the irradiance
reporting condition.
low: float
Bottom value for irradiance filter, usually between 0.5 and 0.8.
high: float
Top value for irradiance filter, usually between 1.2 and 1.5.
irr_col: str
String that is the name of the column with the irradiance data.
plot: bool, default False
Plots graphical view of algorithim searching for reporting irradiance.
Useful for troubleshooting or understanding the method.
Returns
-------
Tuple
Float reporting irradiance and filtered dataframe.
"""
if plot:
irr = df[irr_col].values
x = np.ones(irr.shape[0])
plt.plot(x, irr, 'o', markerfacecolor=(0.5, 0.7, 0.5, 0.1))
plt.ylabel('irr')
x_inc = 1.01
vals_above = 10
perc = 100.
pt_qty = 0
loop_cnt = 0
pt_qty_array = []
# print('--------------- MONTH START --------------')
while perc > 0.6 or pt_qty < 50:
# print('####### LOOP START #######')
df_count = df.shape[0]
df_perc = 1 - (vals_above / df_count)
# print('in percent: {}'.format(df_perc))
irr_RC = (df[irr_col].agg(perc_wrap(df_perc * 100)))
# print('ref irr: {}'.format(irr_RC))
flt_df = flt_irr(df, irr_col, low, high, ref_val=irr_RC)
# print('number of vals: {}'.format(df.shape))
pt_qty = flt_df.shape[0]
# print('flt pt qty: {}'.format(pt_qty))
perc = stats.percentileofscore(flt_df[irr_col], irr_RC) / 100
# print('out percent: {}'.format(perc))
vals_above += 1
pt_qty_array.append(pt_qty)
if perc <= 0.6 and pt_qty <= pt_qty_array[loop_cnt - 1]:
break
loop_cnt += 1
if plot:
x_inc += 0.02
y1 = irr_RC * low
y2 = irr_RC * high
plt.plot(x_inc, irr_RC, 'ro')
plt.plot([x_inc, x_inc], [y1, y2])
if plot:
plt.show()
return(irr_RC, flt_df)
def fit_model(df, fml='power ~ poa + I(poa * poa) + I(poa * t_amb) + I(poa * w_vel) - 1'):
"""
Fits linear regression using statsmodels to dataframe passed.
Dataframe must be first argument for use with pandas groupby object
apply method.
Parameters
----------
df : pandas dataframe
fml : str
Formula to fit refer to statsmodels and patsy documentation for format.
Default is the formula in ASTM E2848.
Returns
-------
Statsmodels linear model regression results wrapper object.
"""
mod = smf.ols(formula=fml, data=df)
reg = mod.fit()
return reg
def predict(regs, rcs):
"""
Calculates predicted values for given linear models and predictor values.
Evaluates the first linear model in the iterable with the first row of the
predictor values in the dataframe. Passed arguments must be aligned.
Parameters
----------
regs : iterable of statsmodels regression results wrappers
rcs : pandas dataframe
Dataframe of predictor values used to evaluate each linear model.
The column names must match the strings used in the regression formuala.
Returns
-------
Pandas series of predicted values.
"""
pred_cap = pd.Series()
for i, mod in enumerate(regs):
RC_df = pd.DataFrame(rcs.iloc[i, :]).T
pred_cap = pred_cap.append(mod.predict(RC_df))
return pred_cap
def pred_summary(grps, rcs, allowance, **kwargs):
"""
Creates summary table of reporting conditions, pred cap, and gauranteed cap.
This method does not calculate reporting conditions.
Parameters
----------
grps : pandas groupby object
Solar data grouped by season or month used to calculate reporting
conditions. This argument is used to fit models for each group.
rcs : pandas dataframe
Dataframe of reporting conditions used to predict capacities.
allowance : float
Percent allowance to calculate gauranteed capacity from predicted capacity.
Returns
-------
Dataframe of reporting conditions, model coefficients, predicted capacities
gauranteed capacities, and points in each grouping.
"""
regs = grps.apply(fit_model, **kwargs)
predictions = predict(regs, rcs)
params = regs.apply(lambda x: x.params.transpose())
pt_qty = grps.agg('count').iloc[:, 0]
predictions.index = pt_qty.index
params.index = pt_qty.index
rcs.index = pt_qty.index
predictions.name = 'PredCap'
for rc_col_name in rcs.columns:
for param_col_name in params.columns:
if rc_col_name == param_col_name:
params.rename(columns={param_col_name: param_col_name + '-param'},
inplace=True)
results = pd.concat([rcs, predictions, params], axis=1)
results['guaranteedCap'] = results['PredCap'] * (1 - allowance)
results['pt_qty'] = pt_qty.values
return results
def pvlib_location(loc):
"""
Creates a pvlib location object.
Parameters
----------
loc : dict
Dictionary of values required to instantiate a pvlib Location object.
loc = {'latitude': float,
'longitude': float,
'altitude': float/int,
'tz': str, int, float, or pytz.timezone, default 'UTC'}
See
http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
for a list of valid time zones.
pytz.timezone objects will be converted to strings.
ints and floats must be in hours from UTC.
Returns
-------
pvlib location object.
"""
return Location(**loc)
def pvlib_system(sys):
"""
Creates a pvlib PVSystem or SingleAxisTracker object.
A SingleAxisTracker object is created if any of the keyword arguments for
initiating a SingleAxisTracker object are found in the keys of the passed
dictionary.
Parameters
----------
sys : dict
Dictionary of keywords required to create a pvlib SingleAxisTracker
or PVSystem.
Example dictionaries:
fixed_sys = {'surface_tilt': 20,
'surface_azimuth': 180,
'albedo': 0.2}
tracker_sys1 = {'axis_tilt': 0, 'axis_azimuth': 0,
'max_angle': 90, 'backtrack': True,
'gcr': 0.2, 'albedo': 0.2}
Refer to pvlib documentation for details.
https://pvlib-python.readthedocs.io/en/latest/generated/pvlib.pvsystem.PVSystem.html
https://pvlib-python.readthedocs.io/en/latest/generated/pvlib.tracking.SingleAxisTracker.html
Returns
-------
pvlib PVSystem or SingleAxisTracker object.
"""
sandia_modules = retrieve_sam('SandiaMod')
cec_inverters = retrieve_sam('cecinverter')
sandia_module = sandia_modules['Canadian_Solar_CS5P_220M___2009_']
cec_inverter = cec_inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
trck_kwords = ['axis_tilt', 'axis_azimuth', 'max_angle', 'backtrack', 'gcr']
if any(kword in sys.keys() for kword in trck_kwords):
system = SingleAxisTracker(**sys,
module_parameters=sandia_module,
inverter_parameters=cec_inverter)
else:
system = PVSystem(**sys,
module_parameters=sandia_module,
inverter_parameters=cec_inverter)
return system
def get_tz_index(time_source, loc):
"""
Creates DatetimeIndex with timezone aligned with location dictionary.
Handles generating a DatetimeIndex with a timezone for use as an agrument
to pvlib ModelChain prepare_inputs method or pvlib Location get_clearsky
method.
Parameters
----------
time_source : dataframe or DatetimeIndex
If passing a dataframe the index of the dataframe will be used. If the
index does not have a timezone the timezone will be set using the
timezone in the passed loc dictionary.
If passing a DatetimeIndex with a timezone it will be returned directly.
If passing a DatetimeIndex without a timezone the timezone in the
timezone dictionary will be used.
Returns
-------
DatetimeIndex with timezone
"""
if isinstance(time_source, pd.core.indexes.datetimes.DatetimeIndex):
if time_source.tz is None:
time_source = time_source.tz_localize(loc['tz'], ambiguous='infer',
errors='coerce')
return time_source
else:
if pytz.timezone(loc['tz']) != time_source.tz:
warnings.warn('Passed a DatetimeIndex with a timezone that '
'does not match the timezone in the loc dict. '
'Using the timezone of the DatetimeIndex.')
return time_source
elif isinstance(time_source, pd.core.frame.DataFrame):
if time_source.index.tz is None:
return time_source.index.tz_localize(loc['tz'], ambiguous='infer',
errors='coerce')
else:
if pytz.timezone(loc['tz']) != time_source.index.tz:
warnings.warn('Passed a DataFrame with a timezone that '
'does not match the timezone in the loc dict. '
'Using the timezone of the DataFrame.')
return time_source.index
def csky(time_source, loc=None, sys=None, concat=True, output='both'):
"""
Calculate clear sky poa and ghi.
Parameters
----------
time_source : dataframe or DatetimeIndex
If passing a dataframe the index of the dataframe will be used. If the
index does not have a timezone the timezone will be set using the
timezone in the passed loc dictionary.
If passing a DatetimeIndex with a timezone it will be returned directly.
If passing a DatetimeIndex without a timezone the timezone in the
timezone dictionary will be used.
loc : dict
Dictionary of values required to instantiate a pvlib Location object.
loc = {'latitude': float,
'longitude': float,
'altitude': float/int,
'tz': str, int, float, or pytz.timezone, default 'UTC'}
See
http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
for a list of valid time zones.
pytz.timezone objects will be converted to strings.
ints and floats must be in hours from UTC.
sys : dict
Dictionary of keywords required to create a pvlib SingleAxisTracker
or PVSystem.
Example dictionaries:
fixed_sys = {'surface_tilt': 20,
'surface_azimuth': 180,
'albedo': 0.2}
tracker_sys1 = {'axis_tilt': 0, 'axis_azimuth': 0,
'max_angle': 90, 'backtrack': True,
'gcr': 0.2, 'albedo': 0.2}
Refer to pvlib documentation for details.
https://pvlib-python.readthedocs.io/en/latest/generated/pvlib.pvsystem.PVSystem.html
https://pvlib-python.readthedocs.io/en/latest/generated/pvlib.tracking.SingleAxisTracker.html
concat : bool, default True
If concat is True then returns columns as defined by return argument
added to passed dataframe, otherwise returns just clear sky data.
output : str, default 'both'
both - returns only total poa and ghi
poa_all - returns all components of poa
ghi_all - returns all components of ghi
all - returns all components of poa and ghi
"""
location = pvlib_location(loc)
system = pvlib_system(sys)
mc = ModelChain(system, location)
times = get_tz_index(time_source, loc)
if output == 'both':
ghi = location.get_clearsky(times=times)
mc.prepare_inputs(times=times)
csky_df = pd.DataFrame({'poa_mod_csky': mc.total_irrad['poa_global'],
'ghi_mod_csky': ghi['ghi']})
if output == 'poa_all':
mc.prepare_inputs(times=times)
csky_df = mc.total_irrad
if output == 'ghi_all':
csky_df = location.get_clearsky(times=times)
if output == 'all':
ghi = location.get_clearsky(times=times)
mc.prepare_inputs(times=times)
csky_df = | pd.concat([mc.total_irrad, ghi], axis=1) | pandas.concat |
import os
import pickle
from pathlib import Path
from typing import Union
import joblib
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from numpy import interp
from sklearn.metrics import roc_curve, auc
import thoipapy.common
import thoipapy.figs
import thoipapy.utils
import thoipapy.validation
import thoipapy.validation.bocurve
from thoipapy.figs.create_BOcurve_files import save_BO_linegraph_and_barchart, save_extra_BO_figs
from thoipapy.validation.bocurve import parse_BO_data_csv_to_excel
from thoipapy.utils import get_test_and_train_set_lists
def run_testset_trainset_validation(s, logging):
# create list of test and train datasets
# if only one is given, make a list with only one dataset
test_set_list, train_set_list = thoipapy.utils.get_test_and_train_set_lists(s)
validate_LIPS_for_testset(s, logging)
validate_LIPS_for_testset(s, logging, LIPS_name="LIPS_surface_ranked", pred_col="LIPS_surface_ranked")
validate_THOIPA_for_testset_trainset_combination(s, test_set_list, train_set_list, logging)
def validate_THOIPA_for_testset_trainset_combination(s, test_set_list, train_set_list, logging):
""" Creates ROC and BO-curve for a particular testset-trainset combination.
Parameters
----------
s : dict
Settings dictionary for figures.
test_set_list : list
List of test datasets in selection
E.g. ["set03", "set31"]
train_set_list : list
List of training datasets in selection
E.g. ["set02", "set04"]
Saved Files
-----------
THOIPA_pred_csv : csv
THOIPA result for this testset-trainset combination
Columns = "residue_num", "residue_name", "THOIPA"
Index = range index of residues
combined_incl_THOIPA_csv : csv
The combined file with all features. THOIPA prediction is added as a new column
THOIPA_ROC_pkl : pickle
Pickled output dictionary with ROC curves
keys = accessions
values = dictionary with fpr, tpr etc for each protein
Could not be saved easily as a dataframe, because the number of residues is different for each protein
"""
names_excel_path = os.path.join(s["base_dir"], "protein_names.xlsx")
namedict = thoipapy.utils.create_namedict(names_excel_path)
for n, train_set in enumerate(train_set_list):
trainsetname = "set{:02d}".format(int(train_set))
model_pkl = Path(s["data_dir"]) / f"results/{trainsetname}/{trainsetname}_ML_model.lpkl"
for test_set in test_set_list:
testsetname = "set{:02d}".format(int(test_set))
# BO_curve_folder = Path(s["data_dir"]) / f"results{testsetname}/blindvalidation/thoipa.train{trainsetname}"
# THOIPA_BO_curve_data_csv = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}_Train{}.THOIPA".format(testsetname, trainsetname), "data", "Test{}_Train{}.THOIPA.best_overlap_data.csv".format(testsetname, trainsetname))
THOIPA_BO_curve_data_csv = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/THOIPA.best_overlap_data.csv"
# THOIPA_ROC_pkl = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}_Train{}.THOIPA".format(testsetname, trainsetname), "data", "Test{}_Train{}.THOIPA.ROC_data.pkl".format(testsetname, trainsetname))
THOIPA_ROC_pkl = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/ROC_data.pkl"
bocurve_data_xlsx = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/bocurve_data.xlsx"
BO_linechart_png = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/BO_linechart.png"
BO_barchart_png = Path(s["data_dir"]) / f"results/{testsetname}/blindvalidation/thoipa.train{trainsetname}/AUBOC_barchart.png"
thoipapy.utils.make_sure_path_exists(bocurve_data_xlsx, isfile=True)
testset_path = thoipapy.common.get_path_of_protein_set(testsetname, s["sets_dir"])
testdataset_df = | pd.read_excel(testset_path) | pandas.read_excel |
import string
import matplotlib.pyplot as plt
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
print('Libraries imported.')
import pandas as pd
# Reading the Json file and convert to an list.
path = input("Enter the file Path:")
dataset = | pd.read_json(path) | pandas.read_json |
from piper.custom import ratio
import datetime
import numpy as np
import pandas as pd
import pytest
from time import strptime
from piper.custom import add_xl_formula
from piper.factory import sample_data
from piper.factory import generate_periods, make_null_dates
from piper.custom import from_julian
from piper.custom import fiscal_year
from piper.custom import from_excel
from piper.custom import to_julian
from piper.verbs import across
# t_sample_data {{{1
@pytest.fixture
def t_sample_data():
return sample_data()
# test_add_xl_formula {{{1
def test_add_xl_formula(t_sample_data):
df = t_sample_data
formula = '=CONCATENATE(A{row}, B{row}, C{row})'
add_xl_formula(df, column_name='X7', formula=formula)
expected = (367, )
assert expected == df.X7.shape
# test_across_str_date_single_col_pd_to_datetime {{{1
def test_across_str_date_single_col_pd_to_datetime():
''' '''
test = ['30/11/2019', '29/4/2019', '30/2/2019', '28/2/2019', '2019/4/30']
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, 'dates', pd.to_datetime, format='%d/%m/%Y', errors='coerce')
assert exp.equals(got) == True
# test_across_str_date_single_col_lambda {{{1
def test_across_str_date_single_col_lambda():
''' '''
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, 'dates', convert_date)
assert exp.equals(got) == True
# test_across_raise_column_parm_none_ValueError {{{1
def test_across_raise_column_parm_none():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, columns=None, function=convert_date)
assert exp.equals(got) == True
# test_across_raise_function_parm_none_ValueError {{{1
def test_across_raise_function_parm_none_ValueError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(ValueError):
got = across(got, columns='dates', function=None)
# test_across_raise_Series_parm_TypeError {{{1
def test_across_raise_Series_parm_TypeError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(TypeError):
got = across(pd.Series(test), columns='dates', function=convert_date)
# test_across_raise_column_parm_ValueError {{{1
def test_across_raise_column_parm_ValueError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = | pd.DataFrame(test, columns=['dates']) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_gui.ipynb (unless otherwise specified).
__all__ = ['STYLE', 'INITIAL_WIDGET_PARAMS', 'Gui', 'Select_stats_widget', 'Select_plots_widget',
'Select_downloads_widget', 'Customization_widget', 'Customize_annotations', 'Select_annotations',
'Customize_y_axis', 'Customize_x_axis', 'Customize_both_axes', 'Customize_other_features', 'launch']
# Cell
from dcl_stats_n_plots import stats
from dcl_stats_n_plots import plots
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pingouin as pg
import itertools
import math
import ipywidgets as w
from IPython.display import display
# Cell
STYLE = {'description_width': 'initial'}
# Cell
# Initial params:
INITIAL_WIDGET_PARAMS = {'uploader': {'visibility': 'visible'},
'stats_button': {'visibility': 'visible'},
'plots_button': {'visibility': 'hidden',
'description': 'Plot the data'},
'downloads_button': {'visibility': 'hidden'},
'stats_dropdown': {'options': [('Pairwise comparison of two or more independent samples', 0),
('Comparison of one group against a fixed value (one-sample test)', 1),
('Mixed_model_ANOVA', 2)],
'visibility': 'visible',
'value': 0},
'plots_dropdown': {'options': [('something initial', 0)],
'visibility': 'hidden',
'value': 0},
'downloads_dropdown': {'visibility': 'hidden',
'value': 2},
'customization_accordion': {'visibility': 'hidden'},
'xlabel_order': {'visibility': 'hidden',
'value': ' '},
'hue_order': {'visibility': 'hidden',
'value': ' '},
'group_colors_vbox': {'children': ()}}
# Cell
class Gui:
"""
Top-level architecture of the GUI
"""
def __init__(self):
self.params = self.set_initial_params()
# Widgets, Output, and Layout
self.uploader = w.FileUpload(accept=('.xlsx, .csv'), multiple=False)
self.stats_selection = Select_stats_widget(self.params)
self.plots_selection = Select_plots_widget(self.params)
self.customization = Customization_widget()
self.downloads_selection = Select_downloads_widget(self.params)
self.out = w.Output()
self.widget = w.VBox([self.uploader,
self.stats_selection.widget,
self.plots_selection.widget,
self.customization.widget,
self.downloads_selection.widget,
self.out])
# Link buttons
self.stats_selection.button.on_click(self.on_stats_button_clicked)
self.plots_selection.button.on_click(self.on_plots_button_clicked)
self.downloads_selection.button.on_click(self.on_downloads_button_clicked)
## Methods to initialize or update the params, or to update the widgets accordingly
# Initialzie params
def set_initial_params(self):
params = {'data': None, # will be updated when data is uploaded
'results': None, # will be updated when statistics are computed
'save_plot': False,
'widgets': INITIAL_WIDGET_PARAMS}
return params
# Update params
def get_updated_params(self):
# Dropdowns
self.params['widgets']['stats_dropdown']['value'] = self.stats_selection.dropdown.value
self.params['widgets']['plots_dropdown']['value'] = self.plots_selection.dropdown.value
self.params['widgets']['downloads_dropdown']['value'] = self.downloads_selection.dropdown.value
# Customization
self.params['fig_width'] = self.customization.other_features.set_fig_width.value
self.params['fig_height'] = self.customization.other_features.set_fig_height.value
self.params['marker_size'] = self.customization.other_features.set_marker_size.value
self.params['show_legend'] = self.customization.other_features.set_show_legend.value
self.params['axes_linewidth'] = self.customization.both_axes.set_axes_linewidth.value
self.params['axes_color'] = self.customization.both_axes.set_axes_color.value
self.params['axes_tick_size'] = self.customization.both_axes.set_axes_tick_size.value
self.params['yaxis_label_text'] = self.customization.yaxis.set_yaxis_label_text.value
self.params['yaxis_label_fontsize'] = self.customization.yaxis.set_yaxis_label_fontsize.value
self.params['yaxis_label_color'] = self.customization.yaxis.set_yaxis_label_color.value
self.params['yaxis_scaling_mode'] = self.customization.yaxis.set_yaxis_scaling_mode.value
self.params['yaxis_lower_lim_value'] = self.customization.yaxis.set_yaxis_lower_lim.value
self.params['yaxis_upper_lim_value'] = self.customization.yaxis.set_yaxis_upper_lim.value
self.params['xaxis_label_color'] = self.customization.xaxis.set_xaxis_label_color.value
self.params['xaxis_label_fontsize'] = self.customization.xaxis.set_xaxis_label_fontsize.value
self.params['xaxis_label_text'] = self.customization.xaxis.set_xaxis_label_text.value
self.params['annotate_all'] = self.customization.select_annotations.set_annotate_all.value
self.params['distance_stars_to_brackets'] = self.customization.customize_annotations.set_distance_stars_to_brackets.value
self.params['distance_brackets_to_data'] = self.customization.customize_annotations.set_distance_brackets_to_data.value
self.params['fontsize_stars'] = self.customization.customize_annotations.set_fontsize_stars.value
self.params['linewidth_annotations'] = self.customization.customize_annotations.set_linewidth_annotations.value
if self.customization.customize_annotations.select_bracket_no_bracket.value == 'brackets':
self.params['annotation_brackets_factor'] = 1
else:
self.params['annotation_brackets_factor'] = 0
if self.customization.customize_annotations.set_stars_fontweight_bold.value == True:
self.params['fontweight_stars'] = 'bold'
else:
self.params['fontweight_stars'] = 'normal'
if self.customization.other_features.select_palette_or_individual_color.value == 0:
self.params['color_palette'] = self.customization.other_features.select_color_palettes.value
else:
color_palette = {}
for group_id in self.params['l_groups']:
color_palette[group_id] = self.customization.other_features.group_colors_vbox.children[self.params['l_groups'].index(group_id)].value
self.params['color_palette'] = color_palette
l_xlabel_order = []
l_xlabel_string = self.customization.xaxis.set_xlabel_order.value
while ', ' in l_xlabel_string:
l_xlabel_order.append(l_xlabel_string[:l_xlabel_string.index(', ')])
l_xlabel_string = l_xlabel_string[l_xlabel_string.index(', ')+2:]
l_xlabel_order.append(l_xlabel_string)
self.params['l_xlabel_order'] = l_xlabel_order
l_hue_order = []
l_hue_string = self.customization.xaxis.set_hue_order.value
while ', ' in l_hue_string:
l_hue_order.append(l_hue_string[:l_hue_string.index(', ')])
l_hue_string = l_hue_string[l_hue_string.index(', ')+2:]
l_hue_order.append(l_hue_string)
self.params['l_hue_order'] = l_hue_order
# Update widgets according to params
def set_updated_params(self):
# Buttons
self.stats_selection.button.layout.visibility = self.params['widgets']['stats_button']['visibility']
self.plots_selection.button.layout.visibility = self.params['widgets']['plots_button']['visibility']
self.plots_selection.button.description = self.params['widgets']['plots_button']['description']
self.downloads_selection.button.layout.visibility = self.params['widgets']['downloads_button']['visibility']
self.uploader.layout.visibility = self.params['widgets']['uploader']['visibility']
# Dropdowns
self.plots_selection.dropdown.layout.visibility = self.params['widgets']['plots_dropdown']['visibility']
self.plots_selection.dropdown.options = self.params['widgets']['plots_dropdown']['options']
self.downloads_selection.dropdown.layout.visibility = self.params['widgets']['downloads_dropdown']['visibility']
# Customization
self.customization.widget.layout.visibility = self.params['widgets']['customization_accordion']['visibility']
## Customize annotations
if len(self.customization.select_annotations.select_annotations_vbox.children) == 0:
self.customization.select_annotations.select_annotations_vbox.children = self.customization.select_annotations.select_annotations_vbox.children + self.params['checkboxes_to_add']
## Customize axes
### x-axis
self.customization.xaxis.set_xlabel_order.value = self.params['widgets']['xlabel_order']['value']
self.customization.xaxis.set_xlabel_order.layout.visibility = self.params['widgets']['xlabel_order']['visibility']
self.customization.xaxis.set_hue_order.value = self.params['widgets']['hue_order']['value']
self.customization.xaxis.set_hue_order.layout.visibility = self.params['widgets']['hue_order']['visibility']
### y-axis
self.customization.yaxis.set_yaxis_lower_lim.value = self.params['yaxis_lower_lim_value']
self.customization.yaxis.set_yaxis_upper_lim.value = self.params['yaxis_upper_lim_value']
## Customize other features
if len(self.customization.other_features.group_colors_vbox.children) == 0:
self.customization.other_features.group_colors_vbox.children = self.params['widgets']['group_colors_vbox']['children']
## Methods to define button functions
# Stats button
def on_stats_button_clicked(self, b):
self.get_updated_params()
# Read the data that was selected using the uploader:
if list(self.uploader.value.keys())[0].endswith('.csv'):
with open("input.csv", "w+b") as i:
i.write(self.uploader.value[list(self.uploader.value.keys())[0]]['content'])
df = pd.read_csv('input.csv')
elif list(self.uploader.value.keys())[0].endswith('.xlsx'):
with open("input.xlsx", "w+b") as i:
i.write(self.uploader.value[list(self.uploader.value.keys())[0]]['content'])
df = pd.read_excel('input.xlsx')
self.params['data'] = df
with self.out:
self.out.clear_output()
# This will create & display whatever is defined as output and allow the bound on_click function to return params
self.params = self.stats_selection.on_button_clicked(self.params)
display(self.params['results']['summary']['pairwise_comparisons'])
# Finally, update all widgets according to the newly specified params:
self.set_updated_params()
# Plots button
def on_plots_button_clicked(self, b):
self.get_updated_params()
with self.out:
self.out.clear_output()
self.params = self.plots_selection.on_button_clicked(self.params)
# Finally, update all widgets according to the newly specified params:
self.set_updated_params()
# Downloads button
def on_downloads_button_clicked(self, b):
self.get_updated_params()
with self.out:
self.out.clear_output()
self.downloads_selection.on_button_clicked(self.params)
if self.downloads_selection.dropdown.value in [1, 2]:
self.params['save_plot'] = True
self.params = self.plots_selection.on_button_clicked(self.params)
self.params['save_plot'] = False
# Finally, update all widgets according to the newly specified params:
self.set_updated_params()
# Cell
class Select_stats_widget:
"Creates the part of the widget that allows to select what statistical comparison shall be made"
def __init__(self, params):
self.dropdown = w.Dropdown(description = 'Please select which test you want to perform:',
options = params['widgets']['stats_dropdown']['options'],
value = params['widgets']['stats_dropdown']['value'],
layout = {'width': '100%',
'visibility': params['widgets']['stats_dropdown']['visibility']},
style = STYLE)
self.button = w.Button(description = "Calculate stats", icon = 'rocket', layout = {'visibility': params['widgets']['stats_button']['visibility']})
self.widget = w.HBox([self.dropdown, self.button])
def on_button_clicked(self, params):
stats_value = params['widgets']['stats_dropdown']['value']
df = params['data']
# Update params values
params['widgets']['uploader']['visibility'] = 'hidden'
params['widgets']['plots_button']['visibility'] = 'visible'
params['widgets']['plots_dropdown']['visibility'] = 'visible'
params['widgets']['downloads_button']['visibility'] = 'visible'
params['widgets']['downloads_dropdown']['visibility'] = 'visible'
params['widgets']['customization_accordion']['visibility'] = 'visible'
if stats_value == 0: # comparison of independent samples
params['widgets']['plots_dropdown']['options'] = [('stripplot', 0),
('boxplot', 1),
('boxplot with stripplot overlay', 2),
('violinplot', 3),
('violinplot with stripplot overlay', 4)]
elif stats_value == 1: # one-sample test:
params['widgets']['plots_dropdown']['options'] = [('sripplot', 0),
('boxplot', 1),
('boxplot with stripplot overlay', 2),
('violinplot', 3),
('violinplot with stripplot overlay', 4),
('histogram', 5)]
elif stats_value == 2: # mixed-model ANOVA
params['widgets']['plots_dropdown']['options'] = [('pointplot', 0),
('boxplot', 1),
('boxplot with stripplot overlay', 2),
('violinplot', 3),
('violinplot with stripplot overlay', 4)]
else:
print('Function not implemented. Please go and annoy Dennis to finally do it')
if stats_value == 0:
results = stats.independent_samples(df)
params['data_col'], params['group_col'], params['l_groups'] = results['data_col'], results['group_col'], results['l_groups']
params['results'], params['performed_test'] = results, results['performed_test']
params = self.create_checkboxes_pairwise_comparisons(params)
elif stats_value == 1:
results = stats.one_sample(df)
params['data_col'], params['group_col'], params['l_groups'] = results['data_col'], results['group_col'], results['l_groups']
params['fixed_val_col'], params['fixed_value'] = results['fixed_val_col'], results['fixed_value']
params['results'], params['performed_test'] = results, results['performed_test']
params = self.create_checkboxes_pairwise_comparisons(params)
elif stats_value == 2:
results = stats.mixed_model_ANOVA(df)
params['data_col'], params['group_col'], params['l_groups'] = results['data_col'], results['group_col'], results['l_groups']
params['subject_col'], params['session_col'], params['l_sessions'] = results['subject_col'], results['session_col'], results['l_sessions']
params['results'], params['performed_test'] = results, results['performed_test']
params = self.create_checkboxes_pairwise_comparisons_mma(params)
params = self.create_group_order_text(params, stats_value)
params = self.create_ylims(params, df, params['data_col'])
params = self.create_group_color_pickers(params, params['l_groups'])
return params
def create_checkboxes_pairwise_comparisons(self, params):
l_groups = params['l_groups']
if len(l_groups) == 1:
fixed_val_col = params['fixed_val_col']
l_checkboxes = [w.Checkbox(value=False,description='{} vs. {}'.format(l_groups[0], fixed_val_col))]
else:
# Create a checkbox for each pairwise comparison
l_checkboxes = [w.Checkbox(value=False,description='{} vs. {}'.format(group1, group2))
for group1, group2 in list(itertools.combinations(l_groups, 2))]
# Arrange checkboxes in a HBoxes with up to 3 checkboxes per HBox
l_HBoxes = []
elem = 0
for i in range(int(len(l_checkboxes)/3)):
l_HBoxes.append(w.HBox(l_checkboxes[elem:elem+3]))
elem = elem + 3
if len(l_checkboxes) % 3 != 0:
l_HBoxes.append(w.HBox(l_checkboxes[elem:]))
# Arrange HBoxes in a VBox and select all as tuple to later place in empty placeholder (select_annotations_vbox)
params['checkboxes_to_add'] = w.VBox(l_HBoxes).children[:]
params['l_checkboxes'] = l_checkboxes
return params
def create_checkboxes_pairwise_comparisons_mma(self, params):
l_sessions = params['l_sessions']
annotate_session_stats_accordion = w.Accordion(children=[], selected_index=None)
l_all_checkboxes = []
for session_id in l_sessions:
params = self.create_checkboxes_pairwise_comparisons(params)
# Little complicated, but neccessary since the output of create_checkboxes_pairwise_comparisons() is a tuple
checkboxes_to_add_temp_vbox = w.VBox([])
checkboxes_to_add_temp_vbox.children = checkboxes_to_add_temp_vbox.children + params['checkboxes_to_add']
annotate_session_stats_accordion.children = annotate_session_stats_accordion.children + (checkboxes_to_add_temp_vbox, )
l_all_checkboxes = l_all_checkboxes + [(session_id, elem) for elem in params['l_checkboxes']]
for i in range(len(list(annotate_session_stats_accordion.children))):
annotate_session_stats_accordion.set_title(i, l_sessions[i])
params['checkboxes_to_add'] = w.VBox([annotate_session_stats_accordion]).children[:]
params['l_checkboxes'] = l_all_checkboxes
return params
def create_group_order_text(self, params, stats_value):
l_groups = params['l_groups']
if stats_value == 0:
for group_id in l_groups:
if l_groups.index(group_id) == 0:
l_xlabel_string = group_id
else:
l_xlabel_string = l_xlabel_string + ', {}'.format(group_id)
params['widgets']['xlabel_order']['value'] = l_xlabel_string
params['widgets']['xlabel_order']['visibility'] = 'visible'
elif stats_value == 1:
params['widgets']['xlabel_order']['value'] = l_groups[0]
elif stats_value == 2:
l_sessions = params['l_sessions']
for session_id in l_sessions:
if l_sessions.index(session_id) == 0:
l_xlabel_string = session_id
else:
l_xlabel_string = l_xlabel_string + ', {}'.format(session_id)
params['widgets']['xlabel_order']['value'] = l_xlabel_string
params['widgets']['xlabel_order']['visibility'] = 'visible'
for group_id in l_groups:
if l_groups.index(group_id) == 0:
l_hue_string = group_id
else:
l_hue_string = l_hue_string + ', {}'.format(group_id)
params['widgets']['hue_order']['value'] = l_hue_string
params['widgets']['hue_order']['visibility'] = 'visible'
return params
def create_ylims(self, params, df, data_col):
if df[data_col].min() < 0:
params['yaxis_lower_lim_value'] = round(df[data_col].min() + df[data_col].min()*0.1, 2)
else:
params['yaxis_lower_lim_value'] = round(df[data_col].min() - df[data_col].min()*0.1, 2)
if df[data_col].max() < 0:
params['yaxis_upper_lim_value'] = round(df[data_col].max() - df[data_col].max()*0.1, 2)
else:
params['yaxis_upper_lim_value'] = round(df[data_col].max() + df[data_col].max()*0.1, 2)
return params
def create_group_color_pickers(self, params, l_groups):
if len(params['widgets']['group_colors_vbox']['children']) == 0:
for group_id in l_groups:
set_group_color = w.ColorPicker(concise = False, description = group_id, style = STYLE)
params['widgets']['group_colors_vbox']['children'] = params['widgets']['group_colors_vbox']['children'] + (set_group_color, )
return params
# Cell
class Select_plots_widget:
"Creates the part of the widget that allows to select what statistical comparison shall be made"
def __init__(self, params):
self.dropdown = w.Dropdown(description = 'Please select which type of plot you want to create:',
options = params['widgets']['plots_dropdown']['options'],
value = params['widgets']['plots_dropdown']['value'],
layout = {'width': '100%',
'visibility': params['widgets']['plots_dropdown']['visibility']},
style = STYLE)
self.button = w.Button(description = "Plot the data", layout = {'visibility': params['widgets']['plots_button']['visibility']})
self.widget = w.HBox([self.dropdown, self.button])
def on_button_clicked(self, params):
stats_value = params['widgets']['stats_dropdown']['value']
plots_value = params['widgets']['plots_dropdown']['value']
df = params['data']
params['widgets']['plots_button']['description'] = 'Refresh the plot'
if stats_value == 0: # independent_samples()
params = self.get_l_stats_to_annotate_independent_samples(params)
if plots_value == 0:
plots.plot_independent_samples(df, plot_type = 'stripplot', **params)
#plots.annotate_stats_independent_samples(ax, df, params)
elif plots_value == 1:
plots.plot_independent_samples(df, plot_type = 'boxplot', **params)
elif plots_value == 2:
plots.plot_independent_samples(df, plot_type = 'boxplot with stripplot overlay', **params)
elif plots_value == 3:
plots.plot_independent_samples(df, plot_type = 'violinplot', **params)
elif plots_value == 4:
plots.plot_independent_samples(df, plot_type = 'violinplot with stripplot overlay', **params)
else:
print("Function not implemented. Please go and annoy Dennis to finally do it")
elif stats_value == 1: # one_sample()
params = self.get_l_stats_to_annotate_independent_samples(params)
if plots_value == 0:
plots.plot_one_sample(df, plot_type = 'stripplot', **params)
elif plots_value == 1:
plots.plot_one_sample(df, plot_type = 'boxplot', **params)
elif plots_value == 2:
plots.plot_one_sample(df, plot_type = 'boxplot with stripplot overlay', **params)
elif plots_value == 3:
plots.plot_one_sample(df, plot_type = 'violinplot', **params)
elif plots_value == 4:
plots.plot_one_sample(df, plot_type = 'violinplot with stripplot overlay', **params)
elif plots_value == 5:
plots.plot_one_sample(df, plot_type = 'histogram', **params)
else:
print("Function not implemented. Please go and annoy Dennis to finally do it")
elif stats_value == 2: # MMA
params = self.get_l_stats_to_annotate_mma(params)
if plots_value == 0:
plots.plot_mma(df, plot_type = 'pointplot', **params)
elif plots_value == 1:
plots.plot_mma(df, plot_type = 'boxplot', **params)
elif plots_value == 2:
plots.plot_mma(df, plot_type = 'boxplot with stripplot overlay', **params)
elif plots_value == 3:
plots.plot_mma(df, plot_type = 'violinplot', **params)
elif plots_value == 4:
plots.plot_mma(df, plot_type = 'violinplot with stripplot overlay', **params)
else:
print("Function not implemented. Please go and annoy Dennis to finally do it")
else:
print("Function not implemented. Please go and annoy Dennis to finally do it")
return params
def get_l_stats_to_annotate_independent_samples(self, params):
l_checkboxes = params['l_checkboxes']
l_stats_to_annotate = []
if params['annotate_all']==True:
for i in range(len(l_checkboxes)):
l_checkboxes[i].value = True
for i in range(len(l_checkboxes)):
if l_checkboxes[i].value:
checkbox_description = l_checkboxes[i].description
group1 = checkbox_description[:checkbox_description.index(' ')]
group2 = checkbox_description[checkbox_description.index(' vs. ') + 5 :]
l_stats_to_annotate.append((group1, group2))
params['l_stats_to_annotate'] = l_stats_to_annotate
return params
def get_l_stats_to_annotate_mma(self, params):
l_checkboxes = params['l_checkboxes']
l_stats_to_annotate = []
if params['annotate_all']==True:
for i in range(len(l_checkboxes)):
l_checkboxes[i][1].value = True
for i in range(len(l_checkboxes)):
if l_checkboxes[i][1].value:
checkbox_description = l_checkboxes[i][1].description
group1 = checkbox_description[:checkbox_description.index(' ')]
group2 = checkbox_description[checkbox_description.index(' vs. ') + 5 :]
session_id = l_checkboxes[i][0]
l_stats_to_annotate.append((group1, group2, session_id))
params['l_stats_to_annotate'] = l_stats_to_annotate
return params
# Cell
class Select_downloads_widget:
"Creates the part of the widget that allows the user to download the results"
def __init__(self, params):
self.dropdown = w.Dropdown(description = 'Please select what output you would like to download:',
options = [('statistical results only', 0), ('plot only', 1), ('both', 2)],
value = params['widgets']['downloads_dropdown']['value'],
layout = {'width': '100%',
'visibility': params['widgets']['downloads_dropdown']['visibility']},
style = STYLE)
self.button = w.Button(description='Download', icon='file-download', layout = {'visibility': params['widgets']['downloads_button']['visibility']})
self.widget = w.HBox([self.dropdown, self.button])
def on_button_clicked(self, params):
downloads_value = params['widgets']['downloads_dropdown']['value']
stats_value = params['widgets']['stats_dropdown']['value']
if downloads_value == 0 or downloads_value == 2:
if stats_value == 0:
df_individual_group_stats = self.get_individual_group_stats_for_download(False, params)
df_group_level_overview = self.get_group_level_stats_for_download(params)
df_pairwise_comparisons = params['results']['summary']['pairwise_comparisons'].copy()
elif stats_value == 1:
df_individual_group_stats = self.get_individual_group_stats_for_download(False, params)
df_pairwise_comparisons = params['results']['summary']['pairwise_comparisons'].copy()
elif stats_value == 2:
df_individual_group_stats = self.get_individual_group_stats_for_download(True, params)
df_group_level_overview = self.get_group_level_stats_for_download(params)
df_pairwise_comparisons = params['results']['summary']['pairwise_comparisons'].copy()
with pd.ExcelWriter('statistic_results.xlsx') as writer:
df_individual_group_stats.to_excel(writer, sheet_name='Individual group statistics')
if stats_value in [0, 2]:
df_group_level_overview.to_excel(writer, sheet_name='Whole-group statistics')
df_pairwise_comparisons.to_excel(writer, sheet_name='Pairwise comparisons')
def calculate_individual_group_stats(self, d, key, params):
group_data = params['results'][key]['data']
d['means'].append(np.mean(group_data))
d['medians'].append(np.median(group_data))
d['stddevs'].append(np.std(group_data))
d['stderrs'].append(np.std(group_data) / math.sqrt(group_data.shape[0]))
d['tests'].append('Shapiro-Wilk')
d['test_stats'].append(params['results'][key]['normality_full'].iloc[0,0])
d['pvals'].append(params['results'][key]['normality_full'].iloc[0,1])
d['bools'].append(params['results'][key]['normality_full'].iloc[0,2])
return d
def get_individual_group_stats_for_download(self, include_sessions, params):
d_individual_group_stats = {'means': [],
'medians': [],
'stddevs': [],
'stderrs': [],
'tests': [],
'test_stats': [],
'pvals': [],
'bools': []}
l_for_index = []
if include_sessions == False:
# for independent samples & one sample:
for group_id in params['l_groups']:
d_individual_group_stats = self.calculate_individual_group_stats(d_individual_group_stats, group_id, params)
l_for_index.append(group_id)
l_index = l_for_index
else:
# for mma:
for group_id in params['l_groups']:
for session_id in params['l_sessions']:
d_individual_group_stats = self.calculate_individual_group_stats(d_individual_group_stats, (group_id, session_id), params)
l_for_index.append((group_id, session_id))
l_index = pd.MultiIndex.from_tuples(l_for_index)
df_individual_group_stats = pd.DataFrame(data=d_individual_group_stats)
multi_index_columns = pd.MultiIndex.from_tuples([('Group statistics', 'Mean'), ('Group statistics', 'Median'), ('Group statistics', 'Standard deviation'), ('Group statistics', 'Standard error'),
('Test for normal distribution', 'Test'), ('Test for normal distribution', 'Test statistic'), ('Test for normal distribution', 'p-value'),
('Test for normal distribution', 'Normally distributed?')])
df_individual_group_stats.columns = multi_index_columns
df_individual_group_stats.index = l_index
return df_individual_group_stats
def get_group_level_stats_for_download(self, params):
df_group_level_overview = pg.homoscedasticity([params['results'][key]['data'] for key in params['results'].keys() if (type(params['results'][key]) == dict) & (key != 'summary')])
df_group_level_overview.index = [0]
df_group_level_overview.columns = | pd.MultiIndex.from_tuples([('Levene', 'W statistic'), ('Levene', 'p value'), ('Levene', 'Equal variances?')]) | pandas.MultiIndex.from_tuples |
import csv
import os
import numpy as np
import pandas as pd
# Loads the episode lengths from the csv files into a dictionary and return the dictionary
def load_data(algpath):
Data = []
dirFiles = os.listdir(algpath)
Files = np.array([i for i in dirFiles if 'episodes' in i])
for fileIndex in range(len(Files)):
List = pd.read_csv(algpath+'/'+Files[fileIndex])
Data.append(List['episode lengths'])
return np.array(Data)
# Converts episode lengths into failure timesteps
def convert_data(alg, Data):
convertedData = []
for run in range(len(Data)):
episodeLengthsData = Data[run].to_numpy()
failureTimesteps = np.cumsum(episodeLengthsData)
totalTimesteps = failureTimesteps[-1]
# Not a failure on the last episode on the last timestep
if episodeLengthsData[-1] != 0.0:
failureTimesteps = failureTimesteps[:-1]
failureTimesteps_DataFrame = pd.DataFrame({'failures': failureTimesteps})
convertedData.append(failureTimesteps_DataFrame)
return convertedData, totalTimesteps
# Transforms the failures timesteps to 'Rewards', 'Returns', 'Failures', 'Average-Rewards'
def transform_data(alg, failureTimesteps, totalTimesteps, transformation='Rewards', window=0):
transformedData = []
for run in range(len(failureTimesteps)):
if run % 10 == 0:
print(run, alg)
# Calculate rewards from failure timesteps
indexing = (failureTimesteps[run] - 1).to_numpy().flatten()
rewardsList = np.zeros(totalTimesteps)
rewardsList[indexing] = -1.0
# Keep the data to rewards
if transformation == 'Rewards':
tempData = pd.DataFrame({'rewards': rewardsList})
# Returns are equal to sum of rewards
elif transformation == 'Returns':
returnsList = np.cumsum(rewardsList)
tempData = pd.DataFrame({'returns': returnsList})
# Failures are equal to negative returns
elif transformation == 'Failures':
returnsList = np.cumsum(rewardsList)
failuresList = -1 * returnsList
tempData = | pd.DataFrame({'cummulativeFailures': failuresList}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Load library
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import numpy as np
import calendar
from datetime import date
def day_of_week(my_date):
return calendar.day_name[my_date.weekday()]
#Read data from csv file
dataframe = | pd.read_csv("MedicalAppointmentNoShows.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 12:07:48 2018
@author: liukang
"""
from pymare import meta_regression
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
import warnings
from sklearn.feature_selection import mutual_info_regression, VarianceThreshold, SelectKBest
warnings.filterwarnings('ignore')
#result_total=pd.DataFrame()
#target subgroup
feature_list=pd.read_csv('/home/liukang/Doc/Meta_regression/age_and_lab5+10.csv')
beta_record = pd.DataFrame()
var_record = pd.DataFrame()
target_record = pd.DataFrame()
for data_num in range(1,6):
beta_data = pd.read_csv("/home/liukang/Doc/Meta_regression/Beta_all_{}.csv".format(data_num))
beta_record = | pd.concat([beta_record,beta_data]) | pandas.concat |
import argparse
import sys
import os
import random
import time
import re
import copy
import pickle
import pandas as pd
import numpy as np
import gc
import torch
from torch import nn, optim, cuda
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler, \
TensorDataset, WeightedRandomSampler
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, precision_recall_fscore_support, roc_auc_score
from sklearn.model_selection import KFold
# from tqdm import tqdm|
from renet2.raw import load_documents
from renet2.raw_handler import *
from renet2.model import *
def free_cuda():
gc.collect()
with torch.cuda.device('cuda'):
torch.cuda.empty_cache()
def set_t(df):
df['pmid'] = df['pmid'].astype(str)
df['geneId'] = df['geneId'].astype(str)
df['diseaseId'] = df['diseaseId'].astype(str)
return df
def get_ann_dataset(features_ft_sub, abs_s_df):
# filtering al non annotated GDA
tmp_ft_df = features_ft_sub[1].copy()
tmp_ft_df['index1'] = tmp_ft_df.index
# print(len(tmp_ft_df))
tmp_ft_df = tmp_ft_df.merge(abs_s_df, on=['pmid', 'geneId', 'diseaseId'], how='right')
tmp_ft_df = tmp_ft_df.dropna(subset=['index1'])
_index = | pd.Int64Index(tmp_ft_df['index1'].values, dtype=np.int64) | pandas.Int64Index |
import os
import sys
import pandas as pd
import numpy as np
import keras
import warnings
warnings.filterwarnings("ignore")
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path2 = os.path.abspath(os.path.join(file_path, "..", "..", "common"))
sys.path.append(lib_path2)
import candle
# download all the data if needed from the repo
data_url = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/uno/Candle_Milestone_16_Version_12_15_2019/Data/Data_For_Testing/'
file_name = 'small_drug_descriptor_data_unique_samples.txt'
drug_descriptor = candle.get_file(file_name, data_url+file_name, cache_subdir='examples')
file_name = 'small_drug_response_data.txt'
response_data = candle.get_file(file_name, data_url+file_name, cache_subdir='examples')
file_name = 'Gene_Expression_Full_Data_Unique_Samples.txt'
gene_expression = candle.get_file(file_name, data_url+file_name, cache_subdir='examples')
file_name = 'CCLE_NCI60_Gene_Expression_Full_Data.txt'
ccle_nci60 = candle.get_file(file_name, data_url+file_name, cache_subdir='examples')
# download all the gene_set files needed
data_url = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/uno/Candle_Milestone_16_Version_12_15_2019/Data/Gene_Sets/MSigDB.v7.0/'
for gene_set_category in ['c2.cgp','c2.cp.biocarta','c2.cp.kegg','c2.cp.pid','c2.cp.reactome','c5.bp','c5.cc','c5.mf','c6.all']:
for gene_name_type in ['entrez', 'symbols']:
file_name = gene_set_category+'.v7.0.'+gene_name_type+'.gmt'
local_file = candle.get_file(file_name, data_url+file_name, cache_subdir='examples/Gene_Sets/MSigDB.v7.0')
# extract base directory for gene_set data files
data_dir = local_file.split(file_name)[0]
print('Gene Set data is locally stored at ', data_dir)
# Select features based on_missing_values
print('\n')
print('Testing select_features_by_missing_values')
print('Drug descriptor dataframe includes 10 drugs (rows) and 10 drug descriptor features (columns)')
data = pd.read_csv(drug_descriptor, sep='\t', engine='c', na_values=['na', '-', ''], header=0, index_col=0, low_memory=False)
print(data)
print('Select features with missing rates smaller than 0.1')
id = candle.select_features_by_missing_values(data, threshold=0.1)
print('Feature IDs', id)
print('Select features with missing rates smaller than 0.3')
id = candle.select_features_by_missing_values(data.values, threshold=0.3)
print('Feature IDs', id)
# Select features based on variation
print('\n')
print('Testing select_features_by_variation')
print('Select features with a variance larger than 100')
id = candle.select_features_by_variation(data, variation_measure='var', threshold=100, portion=None, draw_histogram=False)
print('Feature IDs', id)
print('Select the top 2 features with the largest standard deviation')
id = candle.select_features_by_variation(data, variation_measure='std', portion=0.2)
print('Feature IDs', id)
# Select decorrelated features
print('\n')
print('Testing select_decorrelated_features')
print('Select features that are not identical to each other and are not all missing.')
id = candle.select_decorrelated_features(data, threshold=None, random_seed=None)
print('Feature IDs', id)
print('Select features whose absolute mutual Spearman correlation coefficient is smaller than 0.8')
id = candle.select_decorrelated_features(data, method='spearman', threshold=0.8, random_seed=10)
print('Feature IDs', id)
# Generate cross-validation partitions of data
print('\n')
print('Testing generate_cross_validation_partition')
print('Generate 5-fold cross-validation partition of 10 samples twice')
p = candle.generate_cross_validation_partition(range(10), n_folds=5, n_repeats=2, portions=None, random_seed=None)
print(p)
print('Drug response data of 5 cell lines treated by various drugs.')
data = | pd.read_csv(response_data, sep='\t', engine='c', na_values=['na', '-', ''], header=0, index_col=None, low_memory=False) | pandas.read_csv |
import pandas as pd
import sys
from sklearn.naive_bayes import MultinomialNB
from scipy.stats import f_oneway
from mlxtend.evaluate import cochrans_q
from mlxtend.evaluate import mcnemar
from mlxtend.evaluate import mcnemar_table
import classifier_tool as tool
if __name__ == "__main__":
f_in = sys.argv[1]
df = pd.read_json(f_in)
print("Selection of representation - text")
print("Text selection: title only vs content only vs title and content")
print("Algorithm: Multinomial NB")
print("Feature: BOW - Unigram")
clf = MultinomialNB()
y = df['label'].to_numpy()
df_result = pd.DataFrame()
X, vectorizer = tool.construct_bow_unigrams(df['tc_lower'])
report, dict_result = tool.eval_cv(5, X, y, clf)
sr_tc_lower = pd.Series(dict_result).sort_index()
df_result['tc_lower_P'] = pd.Series([ t['Y']['precision'] for t in report ])
df_result['tc_lower_R'] = pd.Series([ t['Y']['recall'] for t in report ])
df_result['tc_lower_F1'] = pd.Series([ t['Y']['f1-score'] for t in report ])
df_result['tc_lower_acc'] = pd.Series([ t['accuracy'] for t in report ])
# this part is temporary for classification repairment only
#print(sr_tc_lower)
#f_tc_lower_result = f_in.replace('.json', '_tc_lower_result.xlsx')
#sr_tc_lower.to_excel(f_tc_lower_result)
X, vectorizer = tool.construct_bow_unigrams(df['t_lower'])
report, dict_result = tool.eval_cv(5, X, y, clf)
sr_t_lower = | pd.Series(dict_result) | pandas.Series |
import datetime
from time import sleep
import pandas as pd
from loguru import logger
import ofanalysis.const as const
import ofanalysis.utility as ut
import tushare as ts
class TSDataUpdate:
def __init__(self, ts_pro_token:str):
self.__pro = ts.pro_api(ts_pro_token)
self.__today = datetime.date.today()
def retrieve_all(self):
self.retrieve_stock_basic()
self.retrieve_stock_daily_basic()
self.retrieve_stock_daily()
self.retrieve_fund_basic()
self.retrieve_fund_nav()
self.retrieve_fund_share()
self.retrieve_fund_manager()
self.retrieve_fund_portfolio()
def retrieve_stock_basic(self):
logger.info('全量更新股票基础信息stock_basic')
# 分页读取数据
df_stock_basic = pd.DataFrame()
i = 0
while True: # 分页读取数据
df_batch_result = self.__pro.stock_basic(**{
"ts_code": "",
"name": "",
"exchange": "",
"market": "",
"is_hs": "",
"list_status": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"symbol",
"name",
"area",
"industry",
"market",
"list_date",
"is_hs",
"delist_date",
"list_status",
"curr_type",
"exchange",
"cnspell",
"enname",
"fullname"
])
if len(df_batch_result) == 0:
break
df_stock_basic = pd.concat([df_stock_basic, df_batch_result], ignore_index=True)
i += const.EACH_TIME_ITEM
ut.db_del_dict_from_mongodb( # 非增量更新 先清空数据
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
query_dict={}
)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
target_dict=df_stock_basic.to_dict(orient='records')
)
def retrieve_stock_daily_basic(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票每日指标stock_daily_basic')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新股票每日指标stock_daily_basic: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.daily_basic(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"trade_date",
"close",
"turnover_rate",
"turnover_rate_f",
"volume_ratio",
"pe",
"pe_ttm",
"pb",
"ps",
"ps_ttm",
"dv_ratio",
"dv_ttm",
"total_share",
"float_share",
"free_share",
"total_mv",
"circ_mv"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 股票每日指标stock_daily_basic返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_stock_daily(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票日线行情stock_daily')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新股票日线行情stock_daily: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.daily(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"offset": i,
"limit": const.EACH_TIME_ITEM
}, fields=[
"ts_code",
"trade_date",
"open",
"high",
"low",
"close",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 股票日线行情stock_daily返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_fund_basic(self):
logger.info('全量更新基金基础信息fund_basic')
df_all_fund = pd.DataFrame()
i = 0
while True: # 分页读取数据
df_batch_result = self.__pro.fund_basic(**{
"ts_code": "",
"market": "",
"update_flag": "",
"offset": i,
"limit": const.EACH_TIME_ITEM,
"status": ""
}, fields=[
"ts_code",
"name",
"management",
"custodian",
"fund_type",
"found_date",
"due_date",
"list_date",
"issue_date",
"delist_date",
"issue_amount",
"m_fee",
"c_fee",
"duration_year",
"p_value",
"min_amount",
"exp_return",
"benchmark",
"status",
"invest_type",
"type",
"trustee",
"purc_startdate",
"redm_startdate",
"market"
])
if len(df_batch_result) == 0:
break
df_all_fund = pd.concat([df_all_fund, df_batch_result], ignore_index=True)
i += const.EACH_TIME_ITEM
sleep(8)
ut.db_del_dict_from_mongodb( # 非增量更新 先清空数据
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_BASIC,
query_dict={}
)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_BASIC,
target_dict=df_all_fund.to_dict(orient='records')
)
def retrieve_fund_nav(self):
check_field = 'nav_date' # 设置增量更新依据字段
logger.info('更新基金净值行情fund_nav')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_NAV,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新基金净值行情fund_nav: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.fund_nav(**{
"ts_code": "",
"nav_date": date,
"offset": i,
"limit": const.EACH_TIME_ITEM,
"market": "",
"start_date": "",
"end_date": ""
}, fields=[
"ts_code",
"ann_date",
"nav_date",
"unit_nav",
"accum_nav",
"accum_div",
"net_asset",
"total_netasset",
"adj_nav",
"update_flag"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = | pd.concat([df_daily, df_batch_daily], ignore_index=True) | pandas.concat |
# In addition to all the data inside the folder ./data/raw/
# this scripts also requires the file moodAndOtherVariables.csv to be placed inside the folder ./data/external/myaiguideconfidentialdata/Participant1/
import csv
import datetime
import os
import os.path
import pickle
import re
import numpy as np
import pandas as pd
import sys
sys.path.insert(1, '../src/MyAIGuide/data')
from storeBasisPeakInDataFrame import storeBasisPeakInDataFrame
from fitbitDataGatheredFromWebExport import fitbitDataGatheredFromWebExport
from movesDataGatheredFromWebExport import movesDataGatheredFromWebExport
from googleFitGatheredFromWebExport import googleFitGatheredFromWebExport
from google_fit import get_google_fit_activities
from storePainIntensitiesForParticipant1 import storePainIntensitiesForParticipant1
from storeWhatPulse import storeWhatPulse
from storeManicTime import storeManicTime
from storeManicTimeBlankScreen import storeManicTimeBlankScreen
from storeEyeRelatedActivitiesParticipant1 import storeEyeRelatedActivitiesParticipant1
from storeSportDataParticipant1 import storeSportDataParticipant1
from retrieve_mentalstate_participant1 import retrieve_mentalstate_participant1
from calculateCumulatedElevationGainMoves import retrieve_stored_CEG_moves
# Creation of the dataframe where everything will be stored
i = | pd.date_range("2015-11-19", periods=2075, freq="1D") | pandas.date_range |
"""Function that returns data from field AWS
"""
# External modules
import sys, os, glob, json
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.backends.backend_pdf import PdfPages
from pandas.plotting import register_matplotlib_converters
import math
import time
from pathlib import Path
from tqdm import tqdm
import logging
import coloredlogs
# Locals
dirname = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(dirname)
from src.utils.settings import config
def get_field(loc="schwarzsee19"):
with open("data/common/constants.json") as f:
CONSTANTS = json.load(f)
SITE, FOLDER = config(loc)
if loc == "guttannen22":
cols_old = [
"TIMESTAMP",
"T_probe_Avg",
"RH_probe_Avg",
"amb_press_Avg",
"WS",
"SnowHeight",
"SW_IN",
"SW_OUT",
"LW_IN",
"LW_OUT",
"H",
"Tice_Avg(1)",
"Tice_Avg(2)",
"Tice_Avg(3)",
"Tice_Avg(4)",
"Tice_Avg(5)",
"Tice_Avg(6)",
"Tice_Avg(7)",
"Tice_Avg(8)",
]
cols_new = ["time", "temp", "RH", "press", "wind", "snow_h", "SW_global", "SW_out", "LW_in", "LW_out",
"Qs_meas", "T_ice_1", "T_ice_2", "T_ice_3", "T_ice_4", "T_ice_5","T_ice_6","T_ice_7","T_ice_8"]
cols_dict = dict(zip(cols_old, cols_new))
path = FOLDER["raw"] + "CardConvert/"
all_files = glob.glob(path + "*.dat")
li = []
for file in all_files:
df = pd.read_csv(
file,
sep=",",
skiprows=[0,2,3],
parse_dates=["TIMESTAMP"],
)
df = df[cols_old]
df = df.rename(columns=cols_dict)
for col in df.columns:
if col != 'time':
df[col] = df[col].astype(float)
df = df.round(2)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
df = df.set_index("time").sort_index()
df = df[SITE["start_date"] :]
df = df.reset_index()
"""Correct data errors"""
df= df.replace("NAN", np.NaN)
df = df.set_index("time").resample("H").mean().reset_index()
df["missing_type"] = "-"
df.loc[df.wind > 50, "wind"] = np.NaN
df.loc[df.Qs_meas > 300, "Qs_meas"] = np.NaN
df.loc[df.Qs_meas < -300, "Qs_meas"] = np.NaN
df.loc[:, "Qs_meas"] = df["Qs_meas"].interpolate()
df["alb"] = df["SW_out"]/df["SW_global"]
df.loc[df.alb > 1, "alb"] = np.NaN
df.loc[df.alb < 0, "alb"] = np.NaN
df.loc[:, "alb"] = df["alb"].interpolate()
df['ppt'] = df.snow_h.diff()*10*CONSTANTS['RHO_S']/CONSTANTS['RHO_W'] # mm of snowfall w.e. in one hour
df.loc[df.ppt<1, "ppt"] = 0 # Assuming 1 mm error
print(df['ppt'].describe())
# print(df.time[df.T_ice_8.isna()].values[0])
df['T_bulk_meas'] = (df["T_ice_2"] + df["T_ice_3"] + df["T_ice_4"]+ df["T_ice_5"]+ df["T_ice_6"]+df["T_ice_7"])/6
# df['T_bulk_meas'] = (df["T_ice_2"] + df["T_ice_3"] + df["T_ice_4"]+ df["T_ice_5"]+ df["T_ice_6"])/5
df['T_G'] = df["T_ice_1"]
cols = [
"time",
"temp",
"RH",
"wind",
"SW_global",
"alb",
"press",
"missing_type",
"LW_in",
"Qs_meas",
# "ppt",
"snow_h",
"T_bulk_meas",
"T_G",
]
df_out = df[cols]
if df_out.isna().values.any():
print(df_out.isna().sum())
df_out.to_csv(FOLDER["input"] + "field.csv", index=False)
fig, ax = plt.subplots()
x = df.time
# ax.plot(x,df["T_ice_7"])
# ax.plot(x,df["T_ice_6"])
# ax.plot(x,df["T_ice_8"] - df["T_ice_7"])
# ax.plot(x,df["T_ice_7"] - df["T_ice_6"])
# ax.plot(x,df["T_ice_6"] - df["T_ice_5"])
ax.plot(x,df["T_ice_5"] - df["T_ice_4"])
ax.plot(x,df["T_ice_4"] - df["T_ice_3"])
ax.plot(x,df["T_ice_3"] - df["T_ice_2"])
# ax.plot(x,df["T_ice_3"])
ax.set_ylim([-3,0.1])
ax.legend()
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b %d"))
ax.xaxis.set_minor_locator(mdates.DayLocator())
fig.autofmt_xdate()
plt.savefig(
FOLDER['fig'] + "temps.png",
bbox_inches="tight",
dpi=300,
)
plt.clf()
return df_out
if loc == "gangles21":
col_list = [
"TIMESTAMP",
"AirTC_Avg",
"RH",
"WS",
]
cols = ["temp", "RH", "wind"]
df_in = pd.read_csv(
FOLDER["raw"] + "/Gangles_Table15Min.dat",
sep=",",
skiprows=[0, 2, 3, 4],
parse_dates=["TIMESTAMP"],
)
df_in = df_in[col_list]
df_in.rename(
columns={
"TIMESTAMP": "time",
"AirTC_Avg": "temp",
"RH_probe_Avg": "RH",
"WS": "wind",
},
inplace=True,
)
df_in1 = pd.read_csv(
FOLDER["raw"] + "/Gangles_Table60Min.dat",
sep=",",
skiprows=[0, 2, 3],
parse_dates=["TIMESTAMP"],
)
df_in1.rename(
columns={
"TIMESTAMP": "time",
"BP_mbar": "press", # mbar same as hPa
},
inplace=True,
)
for col in df_in1:
if col != "time":
df_in1[col] = pd.to_numeric(df_in1[col], errors="coerce")
df_in = df_in.set_index("time")
df_in1 = df_in1.set_index("time")
df_in1 = df_in1.reindex(
pd.date_range(df_in1.index[0], df_in1.index[-1], freq="15Min"),
fill_value=np.NaN,
)
df_in = df_in.replace("NAN", np.NaN)
df_in1 = df_in1.replace("NAN", np.NaN)
df_in1 = df_in1.resample("15Min").interpolate("linear")
df_in.loc[:, "press"] = df_in1["press"]
df_in = df_in.replace("NAN", np.NaN)
if df_in.isnull().values.any():
print("Warning: Null values present")
print(df_in[cols].isnull().sum())
df_in = df_in.round(3)
df_in = df_in.reset_index()
df_in.rename(columns={"index": "time"},inplace=True,)
start_date = datetime(2020, 12, 14)
df_in = df_in.set_index("time")
df_in = df_in[start_date:]
df1 = pd.read_csv(
FOLDER["raw"] + "/HIAL_input_field.csv",
sep=",",
parse_dates=["When"],
)
df1 = df1.rename(columns={"When": "time"})
df = df_in
df1 = df1.set_index("time")
cols = ["SW_global"]
for col in cols:
df.loc[:, col] = df1[col]
df = df.reset_index()
df = df[df.columns.drop(list(df.filter(regex="Unnamed")))]
df = df.dropna()
# df.to_csv("outputs/" + loc + "_input_field.csv")
mask = df["SW_global"] < 0
mask_index = df[mask].index
df.loc[mask_index, "SW_global"] = 0
# diffuse_fraction = 0
# df["SW_diffuse"] = diffuse_fraction * df.SW_global
# df["SW_direct"] = (1-diffuse_fraction)* df.SW_global
df = df.set_index("time").resample("H").mean().reset_index()
df["ppt"] = 0
df["missing_type"] = "-"
# df["cld"] = 0
df.to_csv(FOLDER["input"] + "field.csv")
return df
if loc == "guttannen20":
df_in = pd.read_csv(
FOLDER["raw"] + "field.txt",
header=None,
encoding="latin-1",
skiprows=7,
sep="\\s+",
index_col=False,
names=[
"Date",
"Time",
"Discharge",
"Wind Direction",
"Wind Speed",
"Maximum Wind Speed",
"Temperature",
"Humidity",
"Pressure",
"Pluviometer",
],
)
types_dict = {
"Date": str,
"Time": str,
"Discharge": float,
"Wind Direction": float,
"Wind Speed": float,
"Temperature": float,
"Humidity": float,
"Pressure": float,
"Pluviometer": float,
}
for col, col_type in types_dict.items():
df_in[col] = df_in[col].astype(col_type)
df_in["time"] = pd.to_datetime(df_in["Date"] + " " + df_in["Time"])
df_in["time"] = pd.to_datetime(df_in["time"], format="%Y.%m.%d %H:%M:%S")
df_in = df_in.drop(["Pluviometer", "Date", "Time"], axis=1)
df_in = df_in.set_index("time").resample("H").mean().reset_index()
mask = (df_in["time"] >= SITE["start_date"]) & (
df_in["time"] <= SITE["end_date"]
)
df_in = df_in.loc[mask]
df_in = df_in.reset_index()
days = pd.date_range(start=SITE["start_date"], end=SITE["end_date"], freq="H")
days = pd.DataFrame({"time": days})
df = pd.merge(
df_in[
[
"time",
"Discharge",
"Wind Speed",
"Temperature",
"Humidity",
"Pressure",
]
],
days,
on="time",
)
df = df.round(3)
# CSV output
df.rename(
columns={
"Wind Speed": "wind",
"Temperature": "temp",
"Humidity": "RH",
"Pressure": "press",
},
inplace=True,
)
logger.info(df_in.head())
logger.info(df_in.tail())
df.to_csv(FOLDER["input"] + "field.csv")
if loc == "guttannen21":
df_in = pd.read_csv(
FOLDER["raw"] + "field.txt",
header=None,
encoding="latin-1",
skiprows=7,
sep="\\s+",
names=[
"Date",
"Time",
"Wind Direction",
"Wind Speed",
"Maximum Wind Speed",
"Temperature",
"Humidity",
"Pressure",
"Pluviometer",
],
)
types_dict = {
"Date": str,
"Time": str,
"Wind Direction": float,
"Wind Speed": float,
"Temperature": float,
"Humidity": float,
"Pressure": float,
"Pluviometer": float,
}
for col, col_type in types_dict.items():
df_in[col] = df_in[col].astype(col_type)
df_in["time"] = pd.to_datetime(df_in["Date"] + " " + df_in["Time"])
df_in["time"] = pd.to_datetime(df_in["time"], format="%Y.%m.%d %H:%M:%S")
df_in = df_in.drop(["Pluviometer", "Date", "Time"], axis=1)
logger.debug(df_in.head())
logger.debug(df_in.tail())
df_in = df_in.set_index("time").resample("H").mean().reset_index()
mask = (df_in["time"] >= SITE["start_date"]) & (
df_in["time"] <= SITE["end_date"]
)
df_in = df_in.loc[mask]
df_in = df_in.reset_index()
days = pd.date_range(start=SITE["start_date"], end=SITE["end_date"], freq="H")
days = pd.DataFrame({"time": days})
df = pd.merge(
df_in[
[
"time",
"Wind Speed",
"Temperature",
"Humidity",
"Pressure",
]
],
days,
on="time",
)
df = df.round(3)
# CSV output
df.rename(
columns={
"Wind Speed": "wind",
"Temperature": "temp",
"Humidity": "RH",
"Pressure": "press",
},
inplace=True,
)
df.to_csv(FOLDER["input"] + "field.csv")
if loc == "schwarzsee19":
df_in = pd.read_csv(
FOLDER["raw"] + SITE["name"][:-2] + "_aws.txt",
header=None,
encoding="latin-1",
skiprows=7,
sep="\\s+",
names=[
"Date",
"Time",
"Discharge",
"Wind Direction",
"Wind Speed",
"Maximum Wind Speed",
"Temperature",
"Humidity",
"Pressure",
"Pluviometer",
],
)
df_in = df_in.drop(["Pluviometer"], axis=1)
df_in["time"] = pd.to_datetime(df_in["Date"] + " " + df_in["Time"])
df_in["time"] = pd.to_datetime(df_in["time"], format="%Y.%m.%d %H:%M:%S")
# Correct datetime errors
for i in tqdm(range(1, df_in.shape[0])):
if str(df_in.loc[i, "time"].year) != "2019":
df_in.loc[i, "time"] = df_in.loc[i - 1, "time"] + pd.Timedelta(
minutes=5
)
df_in = df_in.set_index("time").resample("H").last().reset_index()
mask = (df_in["time"] >= SITE["start_date"]) & (
df_in["time"] <= SITE["end_date"]
)
df_in = df_in.loc[mask]
df_in = df_in.reset_index()
days = pd.date_range(start=SITE["start_date"], end=SITE["end_date"], freq="H")
days = pd.DataFrame({"time": days})
df = pd.merge(
days,
df_in[
[
"time",
"Discharge",
"Wind Speed",
"Maximum Wind Speed",
"Wind Direction",
"Temperature",
"Humidity",
"Pressure",
]
],
on="time",
)
# Include Spray time
df_nights = pd.read_csv(
FOLDER["raw"] + "schwarzsee_fountain_time.txt",
sep="\\s+",
)
df_nights["Start"] = pd.to_datetime(
df_nights["Date"] + " " + df_nights["start"]
)
df_nights["End"] = pd.to_datetime(df_nights["Date"] + " " + df_nights["end"])
df_nights["Start"] = pd.to_datetime(
df_nights["Start"], format="%Y-%m-%d %H:%M:%S"
)
df_nights["End"] = | pd.to_datetime(df_nights["End"], format="%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
#!/usr/bin/python
# Internal functions for renewing the database of stellar atmosphere model and linlist.
# WARNING: the dependene in this module may not be completly satisified, and functions may can only run on Mingjie's computer.
import numpy as np
import pandas as pd
import os
from pymoog import model
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
import line_data
import mendeleev
import re
MOOG_path = '{}/.pymoog/moog_nosm/moog_nosm_NOV2019/'.format(os.environ['HOME'])
MOOG_run_path = '{}/.pymoog/rundir/'.format(os.environ['HOME'])
MOOG_file_path = '{}/.pymoog/files/'.format(os.environ['HOME'])
element2index_dict = {'TiO':[22,8], 'CH':[6,1], 'OH':[8,1], 'MgH':[12,1], 'SiH':[14,1], 'C2':[6,6], 'CN':[6,7], 'CO':[6,8]}
if os.environ.get('READTHEDOCS') != 'True':
atoms = pd.read_csv(MOOG_file_path + '/atoms.csv')
atoms_dict = dict(zip(atoms['symbol'], atoms['mass_number']))
diss_energy = pd.read_csv(MOOG_file_path + '/dissociation_energy_list.csv')
def get_isotope_list(string):
'''
Get the isotope list of element from the last column of VALD line list.
Example: (48)TiO -> [48, 16]
Parameters
----------
string : str
The string in the format of "(\d*)[A-Z][a-z]*". This is the last part in VALD linelist.
'''
a = re.findall(r'\(\d*\)|[A-Z][a-z]*', string)
isotope_list = []
i = 0
while i < len(a):
if a[i][0] == '(':
isotope_list.append(int(a[i].strip('()')))
i += 1
else:
isotope_list.append(atoms_dict[a[i]])
i += 1
return isotope_list
def element2index(string_all):
'''
Convert element string to index in VALD format.
Example: TiO 1, ... (48)TiO -> 822.01648; Fe 1, ... Fe -> 26.0.
Parameters
----------
string_all : str
The string in containing element index in VALD linelist. Combination of the first and last column.
'''
string, isotope_string = string_all.split(',')
isotope_string = isotope_string[-12:]
element_string, ion_stage = string.split(' ')
if element_string in element2index_dict.keys():
element_indices = element2index_dict[element_string]
else:
p = re.compile(r"[A-Z][a-z]*")
p_num = re.compile(r"\d")
ele_loca = []
ele_name = []
num_loca = []
num = []
for m in p.finditer(element_string):
ele_loca.append(m.start())
ele_name.append(m.group())
for m in p_num.finditer(element_string):
num_loca.append(m.start())
num.append(m.group())
element_string_list = []
for i in range(len(ele_name)):
if ele_loca[i]+1 in num_loca:
add_list = [ele_name[i]] * int(num[num_loca.index(ele_loca[i]+1)])
else:
add_list = [ele_name[i]]
element_string_list = element_string_list + add_list
ion_stage = int(ion_stage) - 1
element_indices = []
for ele in element_string_list:
element_indices.append(mendeleev.element(ele).atomic_number)
if len(element_indices) == 1:
return '{}.{}'.format(element_indices[0], ion_stage*10000)
else:
isotope_list = get_isotope_list(isotope_string)
# isotope_list = [x for _,x in sorted(zip(element_indices,isotope_list))]
element_indices.sort()
isotope_list.sort()
element_indices_string = '{:2.0f}' + '{:02.0f}'*(len(element_indices)-1) + '.0' + '{:02.0f}'*len(isotope_list)
element_indices_num = float(element_indices_string.format(*element_indices, *isotope_list))
return element_indices_num
# return element_indices_string.format(*element_indices, *isotope_list)
def get_diss_energy(ele_index):
'''
Get dissociation for an molecular particle from ele_index.
Source: https://labs.chem.ucsb.edu/zakarian/armen/11---bonddissociationenergy.pdf
Only support those in VALD linelist.
Parameters
----------
ele_index : str or float
The element index in MOOG format.
'''
diss_energy['diss_energy(eV)'] = diss_energy['dissociation_energy (kJ/mol)'] / 96.485
diss_energy_pd = diss_energy
ele_index = np.floor(float(ele_index))
try:
diss_energy_value = diss_energy_pd.loc[diss_energy_pd['element_index'] == ele_index, 'diss_energy(eV)'].values[0]
return diss_energy_value
except:
return np.nan
def value2pm(value):
'''
Transform the metallicity value to Kurucz format.
Example: -1.0 -> m10
Parameters
----------
value : float
The value of metallicity.
'''
if value < 0:
return 'm{:02.0f}'.format(np.abs(value)*10)
else:
return 'p{:02.0f}'.format(np.abs(value)*10)
def split_kurucz_model():
'''
Split the Kurucz model into single. Internal function.
'''
grid_kurucz = pd.read_csv('files/grid_points_kurucz.csv')
for m_h in grid_kurucz.groupby('m_h').size().index:
file = open('files/model/kurucz/standard/a{}k2.dat'.format(value2pm(m_h)))
content = file.readlines()
is_first = True
for line in content:
if 'EFF ' in line:
if not(is_first):
with open('files/model/kurucz/standard/single/teff{:.0f}logg{:.1f}m_h{:+.1f}.dat'.format(teff, logg, m_h), 'w') as w_file:
w_file.writelines(model_line)
teff = float(line[5:13])
logg = float(line[21:29])
model_line = [line]
is_first = False
else:
model_line.append(line)
with open('files/model/kurucz/standard/single/teff{:.0f}logg{:.1f}m_h{:+.1f}.dat'.format(teff, logg, m_h), 'w') as w_file:
w_file.writelines(model_line)
def search_grid_point_kurucz():
'''
The function to search all the grid points of Kurucz model and save the list to grid_path.
The search is limit to standard model with microturbulent = 2.
Internal use
'''
teff_range = np.arange(3500, 50001, 250)
logg_range = np.arange(0, 5.1, 0.5)
m_h_range = np.concatenate([np.arange(-5, -0.4, 0.5), np.arange(-0.3, 0, 0.1), [0], np.arange(0.1, 0.35, 0.1) ,[0.5, 1]])
grid_point_kurucz = []
for m_h in m_h_range:
for teff in teff_range:
for logg in logg_range:
if os.path.isfile('files/model/kurucz/standard/single/teff{:.0f}logg{:.1f}m_h{:+.1f}.dat'.format(teff, logg, m_h)):
_, b, _ = model.read_Kurucz_model('files/model/kurucz/standard/single/teff{:.0f}logg{:.1f}m_h{:+.1f}.dat'.format(teff, logg, m_h))
length = b.shape[0]
column = b.shape[1]
if len(grid_point_kurucz) == 0:
grid_point_kurucz = np.array([[teff, logg, m_h, length, column]])
else:
grid_point_kurucz = np.concatenate([grid_point_kurucz, np.array([[teff, logg, m_h, length, column]])])
grid_kurucz = pd.DataFrame(grid_point_kurucz, columns=['Teff', 'logg', 'm_h', 'length', 'column'])
return grid_kurucz
def plot_model_grid():
'''
Plot the grid of models in each metallicity.
Internal use.
'''
grid_kurucz = pd.read_csv('files/grid_points_kurucz.csv')
for m_h in grid_kurucz.groupby('m_h').size().index:
plt.figure(figsize=(13,4))
index = grid_kurucz['m_h'] == m_h
grid_matrix = np.array(grid_kurucz.loc[index, ['Teff', 'logg']])
tri = Delaunay(grid_matrix)
for i in range(len(tri.simplices)-1, -1, -1):
if min(grid_matrix[tri.simplices[i]][:,0]) >= 35000:
teff_gap = 5000
else:
teff_gap = 1500
if np.ptp(grid_matrix[tri.simplices[i]][:,0]) >= teff_gap or np.ptp(grid_matrix[tri.simplices[i]][:,1]) > 0.5:
tri.simplices = np.concatenate([tri.simplices[:i], tri.simplices[i+1:]])
plt.triplot(grid_matrix[:,0], grid_matrix[:,1], tri.simplices, zorder=0, lw=1, color='gray',alpha=0.5)
if m_h < 0.5:
plt.plot([50000, 42500], [5, 5], color='gray', zorder=0, alpha=0.5, lw=1)
elif m_h == 0.5:
plt.plot([45000, 40000], [5, 5], color='gray', zorder=0, alpha=0.5, lw=1)
elif m_h == 1:
plt.plot([40000, 37500], [5, 5], color='gray', zorder=0, alpha=0.5, lw=1)
plt.scatter(grid_kurucz.loc[index & (grid_kurucz['length']==72), 'Teff'], grid_kurucz.loc[index & (grid_kurucz['length']==72), 'logg'], s=5, label='Model length: 72')
plt.scatter(grid_kurucz.loc[index & (grid_kurucz['length']==64), 'Teff'], grid_kurucz.loc[index & (grid_kurucz['length']==64), 'logg'], s=5, c='C3', label='Model length: 64')
plt.legend()
plt.xlim((1175, 52325))
plt.title('[Fe/H] = {:.1f}'.format(m_h))
plt.xlabel(r'$T_\mathrm{{eff}}$'); plt.ylabel('logg')
plt.gca().invert_xaxis(); plt.gca().invert_yaxis()
plt.tight_layout()
plt.savefig('../docs/img/grid_points_kurucz/m_h{:+.1f}.png'.format(m_h), dpi=250)
plt.close()
def combine_linelist():
for ele in ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Fe']:
if ele == 'H':
vald = line_data.read_linelist('files/linelist/vald/vald_H')
else:
vald = pd.concat([vald, line_data.read_linelist('files/linelist/vald/vald_{}'.format(ele))])
vald.sort_values('wavelength', inplace=True)
vald.reset_index(drop=True, inplace=True)
line_data.save_linelist(vald, 'files/linelist/vald/vald_3800_7400.list')
def vald2moog_format(init_linelist_name, out_linelist_name, head=None, loggf_cut=None):
'''
Transform VALD linelist into MOOG format.
Parameters
----------
init_linelist_name : str
The VALD format line list.
out_linelist_name : str
Output line list name
head : int, optional
If specified then only save the first `head` number of lines.
loggf_cut : float, optional
Cut on loggf (only save for the lines with loggf > loggf_cut)
'''
# Find the footer index of VALD line pair
with open(init_linelist_name) as file:
contents = file.readlines()
try:
footer_index = len(contents) - contents.index('* oscillator strengths were scaled by the solar isotopic ratios.\n')
except ValueError:
footer_index = 0
# Delete all the '.
file = open(init_linelist_name)
file_content = file.readlines()
for i in range(len(file_content)):
file_content[i] = file_content[i].replace("'", '')
file.close()
file = open(init_linelist_name, 'w')
file.writelines(file_content)
file.close()
# subprocess.run(['sed', "s/'//g", init_linelist_name, '>', 'temp'])
# subprocess.run(['mv', "temp", init_linelist_name])
vald_init = pd.read_csv(init_linelist_name,skiprows=2, skipfooter=footer_index, usecols=range(9), engine = 'python', names=['element', 'wavelength', 'EP', 'loggf', 'rad_damp', 'Stark_damp', 'Walls_damp', 'Lande_factor', 'Comment'])
if head != None:
vald_init = vald_init[:head]
if loggf_cut != None:
vald_init = vald_init[vald_init['loggf'] >= loggf_cut]
vald_init['element_all'] = vald_init[['element', 'Comment']].apply(lambda x: ', '.join(x), axis=1)
vald_init['element_index'] = vald_init['element_all'].map(element2index)
vald_init['diss_energy'] = vald_init['element_index'].map(get_diss_energy)
vald_out = vald_init[['wavelength', 'element_index', 'EP', 'loggf', 'Walls_damp', 'diss_energy']]
vald_out.columns = ['wavelength', 'element_index', 'EP', 'loggf', 'C6', 'diss_energy']
vald_out = vald_out.astype(np.float64)
# Remove triple or higher ionized lines; MOOG cannot do this.
vald_out = vald_out[np.around(np.mod(vald_out['element_index'],1), decimals=1) < 0.3]
line_data.save_linelist(vald_out, out_linelist_name)
def main():
init_linelist_name = sys.argv[1]
out_linelist_name = sys.argv[2]
vald2moog_format(init_linelist_name, out_linelist_name)
if __name__ == "__main__":
main()
def ges2moog(ges_path, save_path):
GES = | pd.read_csv(ges_path, sep='\t') | pandas.read_csv |
"""
Code for transforming EIA data that pertains to more than one EIA Form.
This module helps normalize EIA datasets and infers additonal connections
between EIA entities (i.e. utilities, plants, units, generators...). This
includes:
- compiling a master list of plant, utility, boiler, and generator IDs that
appear in any of the EIA 860 or 923 tables.
- inferring more complete boiler-generator associations.
- differentiating between static and time varying attributes associated with
the EIA entities, storing the static fields with the entity table, and the
variable fields in an annual table.
The boiler generator association inferrence (bga) takes the associations
provided by the EIA 860, and expands on it using several methods which can be
found in :func:`pudl.transform.eia._boiler_generator_assn`.
"""
import importlib.resources
import logging
import networkx as nx
import numpy as np
import pandas as pd
import pudl
from pudl import constants as pc
logger = logging.getLogger(__name__)
def _occurrence_consistency(entity_id, compiled_df, col,
cols_to_consit, strictness=.7):
"""
Find the occurence of plants & the consistency of records.
We need to determine how consistent a reported value is in the records
across all of the years or tables that the value is being reported, so we
want to compile two key numbers: the number of occurances of the entity and
the number of occurances of each reported record for each entity. With that
information we can determine if the reported records are strict enough.
Args:
entity_id (list): a list of the id(s) for the entity. Ex: for a plant
entity, the entity_id is ['plant_id_eia']. For a generator entity,
the entity_id is ['plant_id_eia', 'generator_id'].
compiled_df (pandas.DataFrame): a dataframe with every instance of the
column we are trying to harvest.
col (str): the column name of the column we are trying to harvest.
cols_to_consit (list): a list of the columns to determine consistency.
This either the [entity_id] or the [entity_id, 'report_date'],
depending on whether the entity is static or annual.
strictness (float): How consistent do you want the column records to
be? The default setting is .7 (so 70% of the records need to be
consistent in order to accept harvesting the record).
Returns:
pandas.DataFrame: this dataframe will be a transformed version of
compiled_df with NaNs removed and with new columns with information
about the consistency of the reported values.
"""
# select only the colums you want and drop the NaNs
# we want to drop the NaNs because
col_df = compiled_df[entity_id + ['report_date', col, 'table']].copy()
if pc.column_dtypes["eia"][col] == pd.StringDtype():
nan_str_mask = (col_df[col] == "nan").fillna(False)
col_df.loc[nan_str_mask, col] = pd.NA
col_df = col_df.dropna()
if len(col_df) == 0:
col_df[f'{col}_consistent'] = pd.NA
col_df[f'{col}_consistent_rate'] = pd.NA
col_df['entity_occurences'] = pd.NA
col_df = col_df.drop(columns=['table'])
return col_df
# determine how many times each entity occurs in col_df
occur = (
col_df
.groupby(by=cols_to_consit, observed=True)
.agg({'table': "count"})
.reset_index()
.rename(columns={'table': 'entity_occurences'})
)
# add the occurances into the main dataframe
col_df = col_df.merge(occur, on=cols_to_consit)
# determine how many instances of each of the records in col exist
consist_df = (
col_df
.groupby(by=cols_to_consit + [col], observed=True)
.agg({'table': 'count'})
.reset_index()
.rename(columns={'table': 'record_occurences'})
)
# now in col_df we have # of times an entity occurred accross the tables
# and we are going to merge in the # of times each value occured for each
# entity record. When we merge the consistency in with the occurances, we
# can determine if the records are more than 70% consistent across the
# occurances of the entities.
col_df = col_df.merge(consist_df, how='outer').drop(columns=['table'])
# change all of the fully consistent records to True
col_df[f'{col}_consistent_rate'] = (
col_df['record_occurences'] / col_df['entity_occurences'])
col_df[f'{col}_consistent'] = (
col_df[f'{col}_consistent_rate'] > strictness)
col_df = col_df.sort_values(f'{col}_consistent_rate')
return col_df
def _lat_long(dirty_df, clean_df, entity_id_df, entity_id,
col, cols_to_consit, round_to=2):
"""Harvests more complete lat/long in special cases.
For all of the entities were there is not a consistent enough reported
record for latitude and longitude, this function reduces the precision of
the reported lat/long by rounding down the reported records in order to get
more complete set of consistent records.
Args:
dirty_df (pandas.DataFrame): a dataframe with entity records that have
inconsistently reported lat/long.
clean_df (pandas.DataFrame): a dataframe with entity records that have
consistently reported lat/long.
entity_id_df (pandas.DataFrame): a dataframe with a complete set of
possible entity ids
entity_id (list): a list of the id(s) for the entity. Ex: for a plant
entity, the entity_id is ['plant_id_eia']. For a generator entity,
the entity_id is ['plant_id_eia', 'generator_id'].
col (string): the column name of the column we are trying to harvest.
cols_to_consit (list): a list of the columns to determine consistency.
This either the [entity_id] or the [entity_id, 'report_date'],
depending on whether the entity is static or annual.
round_to (integer): This is the number of decimals places we want to
preserve while rounding down.
Returns:
pandas.DataFrame: a dataframe with all of the entity ids. some will
have harvested records from the clean_df. some will have harvested
records that were found after rounding. some will have NaNs if no
consistently reported records were found.
"""
# grab the dirty plant records, round and get a new consistency
ll_df = dirty_df.round(decimals={col: round_to})
logger.debug(f"Dirty {col} records: {len(ll_df)}")
ll_df['table'] = 'special_case'
ll_df = _occurrence_consistency(entity_id, ll_df, col, cols_to_consit)
# grab the clean plants
ll_clean_df = clean_df.dropna()
# find the new clean plant records by selecting the True consistent records
ll_df = ll_df[ll_df[f'{col}_consistent']].drop_duplicates(subset=entity_id)
logger.debug(f"Clean {col} records: {len(ll_df)}")
# add the newly cleaned records
ll_clean_df = ll_clean_df.append(ll_df,)
# merge onto the plants df w/ all plant ids
ll_clean_df = entity_id_df.merge(ll_clean_df, how='outer')
return ll_clean_df
def _add_timezone(plants_entity):
"""Adds plant IANA timezones from lat / lon.
Args:
plants_entity (pandas.DataFrame): Plant entity table, including columns
named "latitude", "longitude", and optionally "state"
Returns:
:class:`pandas.DataFrame`: A DataFrame containing the same table, with a
"timezone" column added. Timezone may be missing if lat / lon is
missing or invalid.
"""
plants_entity["timezone"] = plants_entity.apply(
lambda row: pudl.helpers.find_timezone(
lng=row["longitude"], lat=row["latitude"],
state=row["state"], strict=False
),
axis=1,
)
return plants_entity
def _add_additional_epacems_plants(plants_entity):
"""Adds the info for plants that have IDs in the CEMS data but not EIA data.
The columns loaded are plant_id_eia, plant_name, state, latitude, and
longitude. Note that a side effect will be resetting the index on
plants_entity, if onecexists. If that's a problem, modify the code below.
Note that some of these plants disappear from the CEMS before the
earliest EIA data PUDL processes, so if PUDL eventually ingests older
data, these may be redundant.
The set of additional plants is every plant that appears in the hourly CEMS
data (1995-2017) that never appears in the EIA 923 or 860 data (2009-2017
for EIA 923, 2011-2017 for EIA 860).
Args:
plants_entity (pandas.DataFrame) The plant entity table that will be
appended to
Returns:
pandas.DataFrame: The same plants_entity table, with the addition of
some missing EPA CEMS plants.
"""
# Add the plant IDs that are missing and update the values for the others
# The data we're reading is a CSV in pudl/metadata/
# SQL would call this whole process an upsert
# See also: https://github.com/pandas-dev/pandas/issues/22812
cems_df = pd.read_csv(
importlib.resources.open_text(
'pudl.package_data.epa.cems',
'plant_info_for_additional_cems_plants.csv'),
index_col=["plant_id_eia"],
usecols=["plant_id_eia", "plant_name_eia",
"state", "latitude", "longitude"],
)
plants_entity = plants_entity.set_index("plant_id_eia")
cems_unmatched = cems_df.loc[~cems_df.index.isin(plants_entity.index)]
# update will replace columns and index values that add rows or affect
# non-matching columns. It also requires an index, so we set and reset the
# index as necessary. Also, it only works in-place, so we can't chain.
plants_entity.update(cems_df, overwrite=True)
return plants_entity.append(cems_unmatched).reset_index()
def _compile_all_entity_records(entity, eia_transformed_dfs):
"""
Compile all of the entity records from each table they appear in.
Comb through each of the dataframes in the eia_transformed_dfs dictionary
to pull out every instance of the entity id.
"""
# we know these columns must be in the dfs
entity_id = pc.entities[entity][0]
static_cols = pc.entities[entity][1]
annual_cols = pc.entities[entity][2]
base_cols = pc.entities[entity][0] + ['report_date']
# empty list for dfs to be added to for each table below
dfs = []
# for each df in the dict of transformed dfs
for table_name, transformed_df in eia_transformed_dfs.items():
# inside of main() we are going to be adding items into
# eia_transformed_dfs with the name 'annual'. We don't want to harvest
# from our newly harvested tables.
if 'annual' not in table_name:
# if the df contains the desired columns the grab those columns
if set(base_cols).issubset(transformed_df.columns):
logger.debug(f" {table_name}...")
# create a copy of the df to muck with
df = transformed_df.copy()
# we know these columns must be in the dfs
cols = []
# check whether the columns are in the specific table
for column in static_cols + annual_cols:
if column in df.columns:
cols.append(column)
df = df[(base_cols + cols)]
df = df.dropna(subset=entity_id)
# add a column with the table name so we know its origin
df['table'] = table_name
dfs.append(df)
# remove the static columns, with an exception
if ((entity in ('generators', 'plants'))
and (table_name in ('ownership_eia860',
'utilities_eia860',
'generators_eia860'))):
cols.remove('utility_id_eia')
transformed_df = transformed_df.drop(columns=cols)
eia_transformed_dfs[table_name] = transformed_df
# add those records to the compliation
compiled_df = pd.concat(dfs, axis=0, ignore_index=True, sort=True)
# strip the month and day from the date so we can have annual records
compiled_df['report_date'] = compiled_df['report_date'].dt.year
# convert the year back into a date_time object
year = compiled_df['report_date']
compiled_df['report_date'] = pd.to_datetime({'year': year,
'month': 1,
'day': 1})
logger.debug(' Casting harvested IDs to correct data types')
# most columns become objects (ack!), so assign types
compiled_df = compiled_df.astype(pc.entities[entity][3])
return compiled_df
def _manage_strictness(col, eia860_ytd):
"""
Manage the strictness level for each column.
Args:
col (str): name of column
eia860_ytd (boolean): if True, the etl run is attempting to include
year-to-date updated from EIA 860M.
"""
strictness_default = .7
# the longitude column is very different in the ytd 860M data (it appears
# to have an additional decimal point) bc it shows up in the generator
# table but it is a plant level data point, it mucks up the consistency
strictness_cols = {
'plant_name_eia': 0,
'utility_name_eia': 0,
'longitude': 0 if eia860_ytd else .7
}
return strictness_cols.get(col, strictness_default)
def harvesting(entity, # noqa: C901
eia_transformed_dfs,
entities_dfs,
eia860_ytd=False,
debug=False):
"""Compiles consistent records for various entities.
For each entity(plants, generators, boilers, utilties), this function
finds all the harvestable columns from any table that they show up
in. It then determines how consistent the records are and keeps the values
that are mostly consistent. It compiles those consistent records into
one normalized table.
There are a few things to note here. First being that we are not expecting
the outcome here to be perfect! We choose to pull the most consistent
record as reported across all the EIA tables and years, but we also
required a "strictness" level of 70% (this is currently a hard coded
argument for _occurrence_consistency). That means at least 70% of the
records must be the same for us to use that value. So if values for an
entity haven't been reported 70% consistently, then it will show up as a
null value. We built in the ability to add special cases for columns where
we want to apply a different method to, but the only ones we added was for
latitude and longitude because they are by far the dirtiest.
We have determined which columns should be considered "static" or "annual".
These can be found in constants in the `entities` dictionary. Static means
That is should not change over time. Annual means there is annual
variablity. This distinction was made in part by testing the consistency
and in part by an understanding of how the entities and columns relate in
the real world.
Args:
entity (str): plants, generators, boilers, utilties
eia_transformed_dfs (dict): A dictionary of tbl names (keys) and
transformed dfs (values)
entities_dfs(dict): A dictionary of entity table names (keys) and
entity dfs (values)
eia860_ytd (boolean): if True, the etl run is attempting to include
year-to-date updated from EIA 860M.
debug (bool): If True, this function will also return an additional
dictionary of dataframes that includes the pre-deduplicated
compiled records with the number of occurances of the entity and
the record to see consistency of reported values.
Returns:
tuple: A tuple containing:
eia_transformed_dfs (dict): dictionary of tbl names (keys) and
transformed dfs (values)
entity_dfs (dict): dictionary of entity table names (keys) and
entity dfs (values)
Raises:
AssertionError: If the consistency of any record value is <90%.
Todo:
* Return to role of debug.
* Determine what to do with null records
* Determine how to treat mostly static records
"""
# we know these columns must be in the dfs
entity_id = pc.entities[entity][0]
static_cols = pc.entities[entity][1]
annual_cols = pc.entities[entity][2]
logger.debug(" compiling plants for entity tables from:")
compiled_df = _compile_all_entity_records(entity, eia_transformed_dfs)
# compile annual ids
annual_id_df = compiled_df[
['report_date'] + entity_id].copy().drop_duplicates()
annual_id_df.sort_values(['report_date'] + entity_id,
inplace=True, ascending=False)
# create the annual and entity dfs
entity_id_df = annual_id_df.drop(
['report_date'], axis=1).drop_duplicates(subset=entity_id)
entity_df = entity_id_df.copy()
annual_df = annual_id_df.copy()
special_case_cols = {'latitude': [_lat_long, 1],
'longitude': [_lat_long, 1]}
consistency = pd.DataFrame(columns=['column', 'consistent_ratio',
'wrongos', 'total'])
col_dfs = {}
# determine how many times each of the columns occur
for col in static_cols + annual_cols:
if col in annual_cols:
cols_to_consit = entity_id + ['report_date']
if col in static_cols:
cols_to_consit = entity_id
strictness = _manage_strictness(col, eia860_ytd)
col_df = _occurrence_consistency(
entity_id, compiled_df, col, cols_to_consit, strictness=strictness)
# pull the correct values out of the df and merge w/ the plant ids
col_correct_df = (
col_df[col_df[f'{col}_consistent']].
drop_duplicates(subset=(cols_to_consit + [f'{col}_consistent']))
)
# we need this to be an empty df w/ columns bc we are going to use it
if col_correct_df.empty:
col_correct_df = pd.DataFrame(columns=col_df.columns)
if col in static_cols:
clean_df = entity_id_df.merge(
col_correct_df, on=entity_id, how='left')
clean_df = clean_df[entity_id + [col]]
entity_df = entity_df.merge(clean_df, on=entity_id)
if col in annual_cols:
clean_df = annual_id_df.merge(
col_correct_df, on=(entity_id + ['report_date']), how='left')
clean_df = clean_df[entity_id + ['report_date', col]]
annual_df = annual_df.merge(
clean_df, on=(entity_id + ['report_date']))
# get the still dirty records by using the cleaned ids w/null values
# we need the plants that have no 'correct' value so
# we can't just use the col_df records when the consistency is not True
dirty_df = col_df.merge(
clean_df[clean_df[col].isnull()][entity_id])
if col in special_case_cols.keys():
clean_df = special_case_cols[col][0](
dirty_df, clean_df, entity_id_df, entity_id, col,
cols_to_consit, special_case_cols[col][1])
if debug:
col_dfs[col] = col_df
# this next section is used to print and test whether the harvested
# records are consistent enough
total = len(col_df.drop_duplicates(subset=cols_to_consit))
# if the total is 0, the ratio will error, so assign null values.
if total == 0:
ratio = np.NaN
wrongos = np.NaN
logger.debug(f" Zero records found for {col}")
if total > 0:
ratio = (
len(col_df[(col_df[f'{col}_consistent'])].
drop_duplicates(subset=cols_to_consit)) / total
)
wrongos = (1 - ratio) * total
logger.debug(
f" Ratio: {ratio:.3} "
f"Wrongos: {wrongos:.5} "
f"Total: {total} {col}"
)
if ratio < 0.9:
if debug:
logger.error(f'{col} has low consistency: {ratio:.3}.')
else:
raise AssertionError(
f'Harvesting of {col} is too inconsistent at {ratio:.3}.')
# add to a small df to be used in order to print out the ratio of
# consistent records
consistency = consistency.append({'column': col,
'consistent_ratio': ratio,
'wrongos': wrongos,
'total': total}, ignore_index=True)
mcs = consistency['consistent_ratio'].mean()
logger.info(
f"Average consistency of static {entity} values is {mcs:.2%}")
if entity == "plants":
entity_df = _add_additional_epacems_plants(entity_df)
entity_df = _add_timezone(entity_df)
eia_transformed_dfs[f'{entity}_annual_eia'] = annual_df
entities_dfs[f'{entity}_entity_eia'] = entity_df
if debug:
return entities_dfs, eia_transformed_dfs, col_dfs
return (entities_dfs, eia_transformed_dfs)
def _boiler_generator_assn(
eia_transformed_dfs,
eia923_years=pc.working_partitions['eia923']['years'],
eia860_years=pc.working_partitions['eia860']['years'],
debug=False
):
"""
Creates a set of more complete boiler generator associations.
Creates a unique unit_id_pudl for each collection of boilers and generators
within a plant that have ever been associated with each other, based on
the boiler generator associations reported in EIA860. Unfortunately, this
information is not complete for years before 2014, as the gas turbine
portion of combined cycle power plants in those earlier years were not
reporting their fuel consumption, or existence as part of the plants.
For years 2014 and on, EIA860 contains a unit_id_eia value, allowing the
combined cycle plant compoents to be associated with each other. For many
plants not listed in the reported boiler generator associations, it is
nonetheless possible to associate boilers and generators on a one-to-one
basis, as they use identical strings to describe the units.
In the end, between the reported BGA table, the string matching, and the
unit_id_eia values, it's possible to create a nearly complete mapping of
the generation units, at least for 2014 and later.
Args:
eia_transformed_dfs (dict): a dictionary of post-transform dataframes
representing the EIA database tables.
eia923_years (list-like): a list of the years of EIA 923 data that
should be used to infer the boiler-generator associations. By
default it is all the working years of data.
eia860_years (list-like): a list of the years of EIA 860 data that
should be used to infer the boiler-generator associations. By
default it is all the working years of data.
debug (bool): If True, include columns in the returned dataframe
indicating by what method the individual boiler generator
associations were inferred.
Returns:
eia_transformed_dfs (dict): Returns the same dictionary of dataframes
that was passed in, and adds a new dataframe to it representing
the boiler-generator associations as records containing
plant_id_eia, generator_id, boiler_id, and unit_id_pudl
Raises:
AssertionError: If the boiler - generator association graphs are not
bi-partite, meaning generators only connect to boilers, and boilers
only connect to generators.
AssertionError: If all the boilers do not end up with the same unit_id
each year.
AssertionError: If all the generators do not end up with the same
unit_id each year.
"""
# if you're not ingesting both 860 and 923, the bga is not compilable
if not (eia860_years and eia923_years):
return pd.DataFrame()
# compile and scrub all the parts
logger.info("Inferring complete EIA boiler-generator associations.")
bga_eia860 = (
eia_transformed_dfs['boiler_generator_assn_eia860'].copy()
.pipe(_restrict_years, eia923_years, eia860_years)
.astype({
'generator_id': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
"plant_id_eia": int,
})
)
# grab the generation_eia923 table, group annually, generate a new tag
gen_eia923 = eia_transformed_dfs['generation_eia923'].copy()
gen_eia923 = gen_eia923.set_index( | pd.DatetimeIndex(gen_eia923.report_date) | pandas.DatetimeIndex |
"""
GIS For Electrification (GISEle)
Developed by the Energy Department of Politecnico di Milano
Supporting Code
Group of supporting functions used inside all the process of GISEle algorithm
"""
import os
import requests
import pandas as pd
import geopandas as gpd
import numpy as np
import json
import shapely.ops
import iso8601
from scipy.spatial import distance_matrix
from scipy.spatial.distance import cdist
from shapely.geometry import Point, box, LineString, MultiPoint
from shapely.ops import split
from gisele.michele.michele import start
from gisele.data_import import import_pv_data, import_wind_data
from datetime import datetime
def l():
"""Print long separating lines."""
print('-' * 100)
def s():
"""Print short separating lines."""
print("-" * 40)
def nearest(row, df, src_column=None):
"""
Find the nearest point and return the value from specified column.
:param row: Iterative row of the first dataframe
:param df: Second dataframe to be found the nearest value
:param src_column: Column of the second dataframe that will be returned
:return value: Value of the desired src_column of the second dataframe
"""
# Find the geometry that is closest
nearest_p = df['geometry'] == shapely.ops.nearest_points(row['geometry'],
df.unary_union)[1]
# Get the corresponding value from df2 (matching is based on the geometry)
value = df.loc[nearest_p, src_column].values[0]
return value
def distance_2d(df1, df2, x, y):
"""
Find the 2D distance matrix between two datasets of points.
:param df1: first point dataframe
:param df2: second point dataframe
:param x: column representing the x coordinates (longitude)
:param y: column representing the y coordinates (latitude)
:return value: 2D Distance matrix between df1 and df2
"""
d1_coordinates = {'x': df1[x], 'y': df1[y]}
df1_loc = pd.DataFrame(data=d1_coordinates)
df1_loc.index = df1['ID']
d2_coordinates = {'x': df2[x], 'y': df2[y]}
df2_loc = | pd.DataFrame(data=d2_coordinates) | pandas.DataFrame |
import turtle
import pandas
CHANCES = 14 # Global Constant
screen = turtle.Screen()
screen.setup(width=1300, height=600)
map_of_nepal = "./map_of_nepal_outline.gif"
screen.addshape(map_of_nepal)
turtle.shape(map_of_nepal) # to display map of nepal onto our screen
def get_missing_zones(map_zones, user_zones):
missed_zones = [zone for zone in map_zones if zone not in user_zones] # Conditional List Comprehension
# Converting missed_zones list to a dataframe
missed_zones_df = | pandas.DataFrame(missed_zones) | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue( | tm.equalContents(secondCat, self.strIndex) | pandas.util.testing.equalContents |
import OrcFxAPI as orca
from Plotting import Plotting
import pandas as pd
import numpy as np
from IO import IO
import AuxFunctions as aux
class Post:
# Static variables -> used to postprocess batch simulations
row_list = []
batch_results = pd.DataFrame()
period = None
def __init__(self) -> None:
self.results = {
"statics": | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""
"""
import re
import time
import numpy as np
import pandas as pd
from lightgbm import LGBMRegressor, LGBMClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.impute import SimpleImputer
from sklearn.metrics import log_loss, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer, OrdinalEncoder, StandardScaler, OneHotEncoder
from sklearn.utils import column_or_1d
from sklearn.utils.validation import check_is_fitted
from hypernets.tabular import column_selector
from hypernets.utils import logging, infer_task_type, const
try:
import jieba
_jieba_installed = True
except ImportError:
_jieba_installed = False
logger = logging.get_logger(__name__)
def root_mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average', squared=True):
return np.sqrt(
mean_squared_error(y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput, squared=squared))
def subsample(X, y, max_samples, train_samples, task, random_state=9527):
stratify = None
if X.shape[0] > max_samples:
if task != 'regression':
stratify = y
X_train, _, y_train, _ = train_test_split(
X, y, train_size=max_samples, shuffle=True, stratify=stratify
)
if task != 'regression':
stratify = y_train
X_train, X_test, y_train, y_test = train_test_split(
X_train, y_train, train_size=train_samples, shuffle=True, stratify=stratify, random_state=random_state
)
else:
if task != 'regression':
stratify = y
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.5, shuffle=True, stratify=stratify
)
return X_train, X_test, y_train, y_test
class PassThroughEstimator(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def fit_transform(self, X, y=None):
return X
class AsTypeTransformer(BaseEstimator):
def __init__(self, *, dtype):
assert dtype is not None
self.dtype = dtype
super(AsTypeTransformer, self).__init__()
def fit(self, X, y=None):
return self
def transform(self, X):
return X.astype(self.dtype)
def fit_transform(self, X, y=None):
return self.transform(X)
# class SafeLabelEncoder(LabelEncoder):
# def transform(self, y):
# check_is_fitted(self, 'classes_')
# y = column_or_1d(y, warn=True)
#
# unseen = len(self.classes_)
# y = np.array([np.searchsorted(self.classes_, x) if x in self.classes_ else unseen for x in y])
# return y
class SafeLabelEncoder(LabelEncoder):
def transform(self, y):
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
unseen = len(self.classes_)
lookup_table = dict(zip(self.classes_, list(range(0, unseen))))
out = np.full(len(y), unseen)
ind_id = 0
for cell_value in y:
if cell_value in lookup_table:
out[ind_id] = lookup_table[cell_value]
ind_id += 1
return out
class MultiLabelEncoder(BaseEstimator):
def __init__(self, columns=None):
super(MultiLabelEncoder, self).__init__()
self.columns = columns
self.encoders = {}
def fit(self, X, y=None):
assert len(X.shape) == 2
assert isinstance(X, pd.DataFrame) or self.columns is None
if isinstance(X, pd.DataFrame):
if self.columns is None:
self.columns = X.columns.tolist()
for col in self.columns:
data = X.loc[:, col]
if data.dtype == 'object':
data = data.astype('str')
# print(f'Column "{col}" has been convert to "str" type.')
le = SafeLabelEncoder()
le.fit(data)
self.encoders[col] = le
else:
n_features = X.shape[1]
for n in range(n_features):
data = X[:, n]
le = SafeLabelEncoder()
le.fit(data)
self.encoders[n] = le
return self
def transform(self, X):
assert len(X.shape) == 2
assert isinstance(X, pd.DataFrame) or self.columns is None
if self.columns is not None: # dataframe
for col in self.columns:
data = X.loc[:, col]
if data.dtype == 'object':
data = data.astype('str')
X.loc[:, col] = self.encoders[col].transform(data)
else:
n_features = X.shape[1]
assert n_features == len(self.encoders.items())
for n in range(n_features):
X[:, n] = self.encoders[n].transform(X[:, n])
return X
def fit_transform(self, X, *args):
assert len(X.shape) == 2
assert isinstance(X, pd.DataFrame) or self.columns is None
if isinstance(X, pd.DataFrame):
if self.columns is None:
self.columns = X.columns.tolist()
for col in self.columns:
data = X.loc[:, col]
if data.dtype == 'object':
data = data.astype('str')
# print(f'Column "{col}" has been convert to "str" type.')
le = SafeLabelEncoder()
X.loc[:, col] = le.fit_transform(data)
self.encoders[col] = le
else:
n_features = X.shape[1]
for n in range(n_features):
data = X[:, n]
le = SafeLabelEncoder()
X[:, n] = le.fit_transform(data)
self.encoders[n] = le
return X
class SafeOrdinalEncoder(OrdinalEncoder):
__doc__ = r'Adapted from sklearn OrdinalEncoder\n' + OrdinalEncoder.__doc__
def transform(self, X, y=None):
if not isinstance(X, (pd.DataFrame, np.ndarray)):
raise TypeError("Unexpected type {}".format(type(X)))
def make_encoder(categories):
unseen = len(categories)
m = dict(zip(categories, range(unseen)))
vf = np.vectorize(lambda x: m[x] if x in m.keys() else unseen)
return vf
values = X if isinstance(X, np.ndarray) else X.values
encoders_ = [make_encoder(cat) for cat in self.categories_]
result = [encoders_[i](values[:, i]) for i in range(values.shape[1])]
if isinstance(X, pd.DataFrame):
assert len(result) == len(X.columns)
data = {c: result[i] for i, c in enumerate(X.columns)}
result = pd.DataFrame(data, dtype=self.dtype)
else:
result = np.stack(result, axis=1)
if self.dtype != result.dtype:
result = result.astype(self.dtype)
return result
def inverse_transform(self, X):
if not isinstance(X, (pd.DataFrame, np.ndarray)):
raise TypeError("Unexpected type {}".format(type(X)))
def make_decoder(categories, dtype):
if dtype in (np.float32, np.float64, np.float):
default_value = np.nan
elif dtype in (np.int32, np.int64, np.int, np.uint32, np.uint64, np.uint):
default_value = -1
else:
default_value = None
dtype = np.object
unseen = len(categories)
vf = np.vectorize(lambda x: categories[x] if unseen > x >= 0 else default_value,
otypes=[dtype])
return vf
values = X if isinstance(X, np.ndarray) else X.values
decoders_ = [make_decoder(cat, cat.dtype) for i, cat in enumerate(self.categories_)]
result = [decoders_[i](values[:, i]) for i in range(values.shape[1])]
if isinstance(X, pd.DataFrame):
assert len(result) == len(X.columns)
data = {c: result[i] for i, c in enumerate(X.columns)}
result = pd.DataFrame(data)
else:
result = np.stack(result, axis=1)
return result
class SafeOneHotEncoder(OneHotEncoder):
def get_feature_names(self, input_features=None):
"""
Override this method to remove non-alphanumeric chars from feature names
"""
check_is_fitted(self)
cats = self.categories_
if input_features is None:
input_features = ['x%d' % i for i in range(len(cats))]
elif len(input_features) != len(self.categories_):
raise ValueError(
"input_features should have length equal to number of "
"features ({}), got {}".format(len(self.categories_),
len(input_features)))
feature_names = []
for i in range(len(cats)):
names = [input_features[i] + '_' + str(idx) + '_' + re.sub('[^A-Za-z0-9_]+', '_', str(t))
for idx, t in enumerate(cats[i])]
if self.drop_idx_ is not None and self.drop_idx_[i] is not None:
names.pop(self.drop_idx_[i])
feature_names.extend(names)
return np.array(feature_names, dtype=object)
class LogStandardScaler(BaseEstimator):
def __init__(self, copy=True, with_mean=True, with_std=True):
super(LogStandardScaler, self).__init__()
self.scaler = StandardScaler(copy=copy, with_mean=with_mean, with_std=with_std)
self.min_values = None
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
self.X_min_values = np.min(X)
self.scaler.fit(np.log(X - self.X_min_values + 1))
return self
def transform(self, X):
X = np.log(np.clip(X - self.X_min_values + 1, a_min=1, a_max=None))
X = self.scaler.transform(X)
return X
class SkewnessKurtosisTransformer(BaseEstimator):
def __init__(self, transform_fn=None, skew_threshold=0.5, kurtosis_threshold=0.5):
self.columns_ = []
self.skewness_threshold = skew_threshold
self.kurtosis_threshold = kurtosis_threshold
if transform_fn is None:
transform_fn = np.log
self.transform_fn = transform_fn
def fit(self, X, y=None):
assert len(X.shape) == 2
self.columns_ = column_selector.column_skewness_kurtosis(X, skew_threshold=self.skewness_threshold,
kurtosis_threshold=self.kurtosis_threshold)
logger.info(f'SkewnessKurtosisTransformer - selected columns:{self.columns_}')
return self
def transform(self, X):
assert len(X.shape) == 2
if len(self.columns_) > 0:
try:
X[self.columns_] = self.transform_fn(X[self.columns_])
except Exception as e:
logger.error(e)
return X
class FeatureSelectionTransformer(BaseEstimator):
def __init__(self, task=None, max_train_samples=10000, max_test_samples=10000, max_cols=10000,
ratio_select_cols=0.1,
n_max_cols=100, n_min_cols=10, reserved_cols=None):
super(FeatureSelectionTransformer, self).__init__()
self.task = task
if max_cols <= 0:
max_cols = 10000
if max_train_samples <= 0:
max_train_samples = 10000
if max_test_samples <= 0:
max_test_samples = 10000
self.max_train_samples = max_train_samples
self.max_test_samples = max_test_samples
self.max_cols = max_cols
self.ratio_select_cols = ratio_select_cols
self.n_max_cols = n_max_cols
self.n_min_cols = n_min_cols
self.reserved_cols = reserved_cols
self.scores_ = {}
self.columns_ = []
def get_categorical_features(self, X):
cat_cols = column_selector.column_object_category_bool(X)
int_cols = column_selector.column_int(X)
for c in int_cols:
if X[c].min() >= 0 and X[c].max() < np.iinfo(np.int32).max:
cat_cols.append(c)
return cat_cols
def feature_score(self, F_train, y_train, F_test, y_test):
if self.task is None:
self.task, _ = infer_task_type(y_train)
if self.task == 'regression':
model = LGBMRegressor()
eval_metric = root_mean_squared_error
else:
model = LGBMClassifier()
eval_metric = log_loss
cat_cols = self.get_categorical_features(F_train)
model.fit(F_train, y_train,
# eval_set=(F_test, y_test),
# early_stopping_rounds=20,
# verbose=0,
# categorical_feature=cat_cols,
# eval_metric=eval_metric,
)
if self.task == 'regression':
y_pred = model.predict(F_test)
else:
y_pred = model.predict_proba(F_test)[:, 1]
score = eval_metric(y_test, y_pred)
return score
def fit(self, X, y):
start_time = time.time()
if self.task is None:
self.task, _ = infer_task_type(y)
columns = X.columns.to_list()
logger.info(f'all columns: {columns}')
if self.reserved_cols is not None:
self.reserved_cols = list(set(self.reserved_cols).intersection(columns))
logger.info(f'exclude reserved columns: {self.reserved_cols}')
columns = list(set(columns) - set(self.reserved_cols))
if len(columns) > self.max_cols:
columns = np.random.choice(columns, self.max_cols, replace=False)
if len(columns) <= 0:
logger.warn('no columns to score')
self.columns_ = self.reserved_cols
self.scores_ = {}
return self
X_score = X[columns]
X_train, X_test, y_train, y_test = subsample(X_score, y,
max_samples=self.max_test_samples + self.max_train_samples,
train_samples=self.max_train_samples,
task=self.task)
if self.task != 'regression' and y_train.dtype != 'int':
le = LabelEncoder()
y_train = le.fit_transform(y_train)
y_test = le.transform(y_test)
cat_cols = column_selector.column_object_category_bool(X_train)
if len(cat_cols) > 0:
logger.info('ordinal encoding...')
X_train['__datacanvas__source__'] = 'train'
X_test['__datacanvas__source__'] = 'test'
X_all = pd.concat([X_train, X_test], axis=0)
oe = OrdinalEncoder()
X_all[cat_cols] = oe.fit_transform(X_all[cat_cols]).astype('int')
X_train = X_all[X_all['__datacanvas__source__'] == 'train']
X_test = X_all[X_all['__datacanvas__source__'] == 'test']
X_train.pop('__datacanvas__source__')
X_test.pop('__datacanvas__source__')
self.scores_ = {}
for c in columns:
F_train = X_train[[c]]
F_test = X_test[[c]]
self.scores_[c] = self.feature_score(F_train, y_train, F_test, y_test)
logger.info(f'Feature score: {c}={self.scores_[c]}')
sorted_scores = sorted([[col, score] for col, score in self.scores_.items()], key=lambda x: x[1])
logger.info(f'feature scores:{sorted_scores}')
topn = np.min([np.max([int(len(columns) * self.ratio_select_cols), np.min([len(columns), self.n_min_cols])]),
self.n_max_cols])
if self.reserved_cols is not None:
self.columns_ = self.reserved_cols
else:
self.columns_ = []
self.columns_ += [s[0] for s in sorted_scores[:topn]]
logger.info(f'selected columns:{self.columns_}')
logger.info(f'taken {time.time() - start_time}s')
del X_score, X_train, X_test, y_train, y_test
return self
def transform(self, X):
return X[self.columns_]
class FloatOutputImputer(SimpleImputer):
def transform(self, X):
return super().transform(X).astype(np.float64)
class LgbmLeavesEncoder(BaseEstimator, TransformerMixin):
def __init__(self, cat_vars, cont_vars, task, **params):
super(LgbmLeavesEncoder, self).__init__()
self.lgbm = None
self.cat_vars = cat_vars
self.cont_vars = cont_vars
self.new_columns = []
self.task = task
self.lgbm_params = params
def fit(self, X, y):
from lightgbm import LGBMClassifier, LGBMRegressor
X[self.cont_vars] = X[self.cont_vars].astype('float')
X[self.cat_vars] = X[self.cat_vars].astype('int')
logger.info(f'LightGBM task:{self.task}')
if self.task == const.TASK_MULTICLASS: # multiclass label
if len(y.shape) > 1 and y.shape[1] > 1:
num_class = y.shape[-1]
if self.lgbm_params is None:
self.lgbm_params = {}
y = y.argmax(axis=-1)
else:
if hasattr(y, 'unique'):
num_class = len(set(y.unique()))
else:
num_class = len(set(y))
self.lgbm_params['num_class'] = num_class + 1
self.lgbm_params['n_estimators'] = int(100 / num_class) + 1
if self.task == const.TASK_REGRESSION:
self.lgbm = LGBMRegressor(**self.lgbm_params)
else:
self.lgbm = LGBMClassifier(**self.lgbm_params)
self.lgbm.fit(X, y)
return self
def transform(self, X):
X[self.cont_vars] = X[self.cont_vars].astype('float')
X[self.cat_vars] = X[self.cat_vars].astype('int')
leaves = self.lgbm.predict(X, pred_leaf=True, num_iteration=self.lgbm.best_iteration_)
new_columns = [f'lgbm_leaf_{i}' for i in range(leaves.shape[1])]
df_leaves = pd.DataFrame(leaves, columns=new_columns, index=X.index)
result = pd.concat([X, df_leaves], axis=1)
self.new_columns = new_columns
return result
class CategorizeEncoder(BaseEstimator, TransformerMixin):
def __init__(self, columns=None, remain_numeric=True):
super(CategorizeEncoder, self).__init__()
self.columns = columns
self.remain_numeric = remain_numeric
# fitted
self.new_columns = []
def fit(self, X, y=None):
if self.columns is None:
self.columns = X.columns.tolist()
new_columns = []
if self.remain_numeric:
for col in self.columns:
target_col = col + const.COLUMNNAME_POSTFIX_CATEGORIZE
new_columns.append((target_col, 'str', X[col].nunique()))
self.new_columns = new_columns
return self
def transform(self, X):
for col in self.columns:
if self.remain_numeric:
target_col = col + const.COLUMNNAME_POSTFIX_CATEGORIZE
else:
target_col = col
X[target_col] = X[col].astype('str')
return X
class MultiKBinsDiscretizer(BaseEstimator, TransformerMixin):
def __init__(self, columns=None, bins=None, strategy='quantile'):
super(MultiKBinsDiscretizer, self).__init__()
logger.info(f'{len(columns)} variables to discrete.')
self.columns = columns
self.bins = bins
self.strategy = strategy
self.new_columns = []
self.encoders = {}
def fit(self, X, y=None):
self.new_columns = []
if self.columns is None:
self.columns = X.columns.tolist()
for col in self.columns:
new_name = col + const.COLUMNNAME_POSTFIX_DISCRETE
n_unique = X.loc[:, col].nunique()
n_null = X.loc[:, col].isnull().sum()
c_bins = self.bins
if c_bins is None or c_bins <= 0:
c_bins = round(n_unique ** 0.25) + 1
encoder = KBinsDiscretizer(n_bins=c_bins, encode='ordinal', strategy=self.strategy)
self.new_columns.append((col, new_name, encoder.n_bins))
encoder.fit(X[[col]])
self.encoders[col] = encoder
return self
def transform(self, X):
for col in self.columns:
new_name = col + const.COLUMNNAME_POSTFIX_DISCRETE
encoder = self.encoders[col]
nc = encoder.transform(X[[col]]).astype(const.DATATYPE_LABEL).reshape(-1)
X[new_name] = nc
return X
class DataFrameWrapper(BaseEstimator, TransformerMixin):
def __init__(self, transform, columns=None):
super(DataFrameWrapper, self).__init__()
self.transformer = transform
self.columns = columns
def fit(self, X, y=None):
if self.columns is None:
self.columns = X.columns.tolist()
self.transformer.fit(X)
return self
def transform(self, X):
df = pd.DataFrame(self.transformer.transform(X))
df.columns = self.columns
return df
class GaussRankScaler(BaseEstimator):
def __init__(self):
super(GaussRankScaler, self).__init__()
self.epsilon = 0.001
self.lower = -1 + self.epsilon
self.upper = 1 - self.epsilon
self.range = self.upper - self.lower
self.divider = None
def fit_transform(self, X, y=None):
from scipy.special import erfinv
i = np.argsort(X, axis=0)
j = np.argsort(i, axis=0)
assert (j.min() == 0).all()
assert (j.max() == len(j) - 1).all()
j_range = len(j) - 1
self.divider = j_range / self.range
transformed = j / self.divider
transformed = transformed - self.upper
transformed = erfinv(transformed)
return transformed
class VarLenFeatureEncoder:
def __init__(self, sep='|'):
super(VarLenFeatureEncoder, self).__init__()
self.sep = sep
self.encoder: SafeLabelEncoder = None
self._max_element_length = 0
def fit(self, X: pd.Series):
self._max_element_length = 0 # reset
if not isinstance(X, pd.Series):
X = pd.Series(X)
key_set = set()
# flat map
for keys in X.map(lambda _: _.split(self.sep)):
if len(keys) > self._max_element_length:
self._max_element_length = len(keys)
key_set.update(keys)
key_set = list(key_set)
key_set.sort()
lb = SafeLabelEncoder() # fix unseen values
lb.fit(np.array(key_set))
self.encoder = lb
return self
def transform(self, X: pd.Series):
if self.encoder is None:
raise RuntimeError("Not fit yet .")
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
if not isinstance(X, pd.Series):
X = pd.Series(X)
# Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
data = X.map(lambda _: (self.encoder.transform(_.split(self.sep)) + 1).tolist())
transformed = pad_sequences(data, maxlen=self._max_element_length, padding='post',
truncating='post').tolist() # cut last elements
return transformed
@property
def n_classes(self):
return len(self.encoder.classes_)
@property
def max_element_length(self):
return self._max_element_length
class MultiVarLenFeatureEncoder(BaseEstimator, TransformerMixin):
def __init__(self, features):
super(MultiVarLenFeatureEncoder, self).__init__()
self.features = features
# fitted
self.encoders_ = {} # feature name -> VarLenFeatureEncoder
self.max_length_ = {} # feature name -> max length
def fit(self, X, y=None):
encoders = {feature[0]: VarLenFeatureEncoder(feature[1]) for feature in self.features}
max_length = {}
for k, v in encoders.items():
v.fit(X[k])
max_length[k] = v.max_element_length
self.encoders_ = encoders
self.max_length_ = max_length
return self
def transform(self, X):
for k, v in self.encoders_.items():
X[k] = v.transform(X[k])
return X
class LocalizedTfidfVectorizer(TfidfVectorizer):
def decode(self, doc):
doc = super().decode(doc)
if _jieba_installed and self._exist_chinese(doc):
doc = ' '.join(jieba.cut(doc))
return doc
@staticmethod
def _exist_chinese(s):
if isinstance(s, str):
for ch in s:
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
class TfidfEncoder(BaseEstimator, TransformerMixin):
def __init__(self, columns=None, flatten=False, **kwargs):
assert columns is None or isinstance(columns, (str, list, tuple))
if isinstance(columns, str):
columns = [columns]
super(TfidfEncoder, self).__init__()
self.columns = columns
self.flatten = flatten
self.encoder_kwargs = kwargs.copy()
# fitted
self.encoders_ = None
def fit(self, X, y=None):
assert isinstance(X, (np.ndarray, pd.DataFrame)) and len(X.shape) == 2
if self.columns is None:
if isinstance(X, pd.DataFrame):
columns = column_selector.column_object(X)
else:
columns = range(X.shape[1])
else:
columns = self.columns
encoders = {}
for c in columns:
encoder = LocalizedTfidfVectorizer(**self.encoder_kwargs)
Xc = X[c] if isinstance(X, pd.DataFrame) else X[:, c]
encoders[c] = encoder.fit(Xc, y)
self.encoders_ = encoders
return self
def transform(self, X, y=None):
assert self.encoders_ is not None
assert isinstance(X, (np.ndarray, pd.DataFrame)) and len(X.shape) == 2
if isinstance(X, pd.DataFrame):
X = X.copy()
if self.flatten:
dfs = [X]
for c, encoder in self.encoders_.items():
t = encoder.transform(X[c]).toarray()
dfs.append(pd.DataFrame(t, index=X.index, columns=[f'{c}_tfidf_{i}' for i in range(t.shape[1])]))
X.pop(c)
X = pd.concat(dfs, axis=1)
else:
for c, encoder in self.encoders_.items():
t = encoder.transform(X[c]).toarray()
X[c] = t.tolist()
else:
r = []
tolist = None if self.flatten else np.vectorize(self._to_array, otypes=[np.object], signature='(m)->()')
for i in range(X.shape[1]):
Xi = X[:, i]
if i in self.encoders_.keys():
encoder = self.encoders_[i]
t = encoder.transform(Xi).toarray()
if tolist is not None:
t = tolist(t).reshape((-1, 1))
r.append(t)
else:
r.append(Xi)
X = np.hstack(r)
return X
@staticmethod
def _to_list(x):
return x.tolist()
@staticmethod
def _to_array(x):
return x
class DatetimeEncoder(BaseEstimator, TransformerMixin):
all_items = ['year', 'month', 'day', 'hour', 'minute', 'second',
'week', 'weekday', 'dayofyear',
'timestamp']
all_items = {k: k for k in all_items}
all_items['timestamp'] = lambda t: time.mktime(t.timetuple())
default_include = ['month', 'day', 'hour', 'minute',
'week', 'weekday', 'dayofyear']
def __init__(self, columns=None, include=None, exclude=None, extra=None, drop_constants=True):
assert columns is None or isinstance(columns, (str, list, tuple))
assert include is None or isinstance(include, (str, list, tuple))
assert exclude is None or isinstance(exclude, (str, list, tuple))
assert extra is None or isinstance(extra, (tuple, list))
if extra is not None:
assert all(len(x) == 2 and isinstance(x[0], str)
and (x[1] is None or isinstance(x[1], str) or callable(x[1]))
for x in extra)
if isinstance(columns, str):
columns = [columns]
if include is None:
to_extract = self.default_include
elif isinstance(include, str):
to_extract = [include]
else:
to_extract = include
if isinstance(exclude, str):
exclude = [exclude]
if exclude is not None:
to_extract = [i for i in to_extract if i not in exclude]
assert all(i in self.all_items for i in to_extract)
to_extract = {k: self.all_items[k] for k in to_extract}
if isinstance(extra, (tuple, list)):
for k, c in extra:
to_extract[k] = c
super(DatetimeEncoder, self).__init__()
self.columns = columns
self.include = include
self.exclude = exclude
self.extra = extra
self.drop_constants = drop_constants
self.extract_ = to_extract
def fit(self, X, y=None):
if self.columns is None:
if not isinstance(X, pd.DataFrame):
X = | pd.DataFrame(X) | pandas.DataFrame |
import os
import argparse
import numpy as np
import pickle as pk
import seaborn as sn
import pandas as pd
import json
import matplotlib.pyplot as plt
from matplotlib.collections import EventCollection
from scipy.interpolate import make_interp_spline, BSpline
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--train', required=True, dest='train', type=dir_path, help="training session path")
args = parser.parse_args()
session_path = args.train
# with open(f'{session_path}/training_acc.pickle', 'rb') as input_file:
# training_acc = pk.load(input_file)
# with open(f'{session_path}/training_loss.pickle', 'rb') as input_file:
# training_loss = pk.load(input_file)
with open(f'{session_path}/training_data.pickle', 'rb') as input_file:
train_data = pk.load(input_file)
with open(f'{session_path}/confusionmatrix_float.pickle', 'rb') as input_file:
cmatrix_float = pk.load(input_file)
with open(f'{session_path}/confusionmatrix_fixed.pickle', 'rb') as input_file:
cmatrix_fixed = pk.load(input_file)
json_file = open(f'{session_path}/training_summary.json', 'r')
json_data = json.load(json_file)
dataset_name = json_data['dataset_name']
fully_2_outdim = json_data['fully_2_outdim']
train_dic = {
'train_loss' : 0,
'valid_loss' : 1,
'train_acc' : 2,
'valid_acc' : 3,
'learing_rate' : 4
}
cnt = 0
cnt_t = 0
matrix_float = np.zeros((fully_2_outdim, fully_2_outdim), dtype=int)
for matrix in cmatrix_float:
matrix_float += matrix
for i in range(fully_2_outdim):
cnt += matrix[i][i]
cnt_t += matrix.sum(axis=0)[i]
cnt_q = 0
cnt_t_q = 0
matrix_fixed = np.zeros((fully_2_outdim, fully_2_outdim), dtype=int)
for matrix in cmatrix_fixed:
matrix_fixed += matrix
for i in range(fully_2_outdim):
cnt_q += matrix[i][i]
cnt_t_q += matrix.sum(axis=0)[i]
y1_data = train_data[train_dic['train_acc']]
y3_data = train_data[train_dic['valid_acc']]
y2_data = train_data[train_dic['train_loss']]
y4_data = train_data[train_dic['valid_loss']]
y5_data = train_data[train_dic['learing_rate']]
x = np.array(range(1,len(y1_data)+1))
fig = plt.figure()
xnew = np.linspace(x.min(), x.max(), 300)
spl1 = make_interp_spline(x, y1_data, k=3) # type: BSpline
spl2 = make_interp_spline(x, y2_data, k=3) # type: BSpline
spl3 = make_interp_spline(x, y3_data, k=3) # type: BSpline
spl4 = make_interp_spline(x, y4_data, k=3) # type: BSpline
spl5 = make_interp_spline(x, y5_data, k=3) # type: BSpline
# fig.suptitle(f'Training path: {session_path}')
graph1_smooth = fig.add_subplot(1, 2, 1)
graph1_smooth.set_title('Accuracy per epoch')
# plt.ylabel("Threat score")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
graph1_smooth = spl1(xnew)
graph1_smooth_2 = spl3(xnew)
p1 = plt.plot(xnew,graph1_smooth)
p2 = plt.plot(xnew,graph1_smooth_2)
# plt.text(xnew[-1], np.amin((graph1_smooth,graph1_smooth_2)),f'Training\n', color='tab:blue', va='bottom', ha='right') #, weight="bold"
# plt.text(xnew[-1], np.amin((graph1_smooth,graph1_smooth_2)),f'Validation', color='tab:orange', va='bottom', ha='right')
plt.legend((p1[0], p2[0]), ('Training', 'Validation'), loc='lower right')
graph2_smooth = fig.add_subplot(1, 2, 2)
graph2_smooth.set_title('Loss & learning rate per epoch')
plt.ylabel("Loss")
plt.xlabel("Epoch")
graph3_smooth = graph2_smooth.twinx()
plt.ylabel("Learning rate", color='tab:green')
graph2_smooth = spl2(xnew)
graph2_smooth_2 = spl4(xnew)
graph3_smooth = spl5(xnew)
p1 = plt.plot(xnew,graph2_smooth)
p2 = plt.plot(xnew,graph2_smooth_2)
p3 = plt.plot(xnew,graph3_smooth)
# plt.text(xnew[-1], np.amax((graph2_smooth,graph2_smooth_2)),f'Training', color='tab:blue', va='top', ha='right') #, weight="bold"
# plt.text(xnew[-1], np.amax((graph2_smooth,graph2_smooth_2)),f'\nValidation', color='tab:orange', va='top', ha='right')
plt.legend((p1[0], p2[0], p3[0]), ('Training', 'Validation', 'Learning rate'), loc='upper right')
fig.tight_layout()
plt.show()
exit()
cm = fig.add_subplot(2, 2, 3)
cm.set_title(f'Floating point model confusion matrix\nAccuracy: {np.sum(np.diag(matrix_float))/np.sum(matrix_float):.5f}')
# cmap = sn.cubehelix_palette(as_cmap=True, light=1)
cmap = sn.cubehelix_palette(gamma= 8, start=1.4, rot=.55, dark=0.8, light=1, as_cmap=True)
# cmap = sn.cubehelix_palette(gamma= 16, start=0.15, rot=.15, dark=0.9, light=1, as_cmap=True)
df_cm = pd.DataFrame(matrix_float, index = [i for i in dataset_name], columns = [i for i in dataset_name])
# sn.load('month', 'year', 'passengers')
res = sn.heatmap(df_cm, annot=True, fmt='g', cmap = cmap) # vmax=2000.0
for _, spine in res.spines.items():
spine.set_visible(True)
plt.ylabel("Predicted label")
plt.xlabel("True label")
cm = fig.add_subplot(2, 2, 4)
cm.set_title(f'Fixed point model confusion matrix\nAccuracy: {np.sum(np.diag(matrix_fixed))/np.sum(matrix_fixed):.5f}')
df_cm = | pd.DataFrame(matrix_fixed, index = [i for i in dataset_name], columns = [i for i in dataset_name]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import polling2
import requests
import json
from web3 import Web3
import pandas as pd
from decouple import config
from datetime import datetime
import logging
from collections import defaultdict
import time
from sqlalchemy import create_engine, desc
from sqlalchemy.orm import sessionmaker
from models import EdenBlock, Epoch, Base, Distribution, DistributionBalance
from apscheduler.schedulers.background import BackgroundScheduler
INFURA_ENDPOINT = config('INFURA_ENDPOINT')
PSQL_ENDPOINT = config('PSQL_ENDPOINT')
engine = create_engine(PSQL_ENDPOINT)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query_dict = {
'block': 'block.graphql',
'distribution': 'distribution.graphql',
'block_lookup': 'block_lookup.graphql',
'epoch_latest': 'epoch_latest.graphql',
'epoch': 'epoch.graphql'
}
eden_governance_api = 'https://api.thegraph.com/subgraphs/name/eden-network/governance'
eden_distribution_api = 'https://api.thegraph.com/subgraphs/name/eden-network/distribution'
eden_network_api = 'https://api.thegraph.com/subgraphs/name/eden-network/network'
def query_to_dict(rset):
result = defaultdict(list)
for obj in rset:
instance = inspect(obj)
for key, x in instance.attrs.items():
result[key].append(x.value)
return result
def get_web3_provider():
infura_endpoint = INFURA_ENDPOINT
my_provider = Web3.HTTPProvider(infura_endpoint)
w3 = Web3(my_provider)
return w3
def get_latest_eth_block():
eden_db_last_block = get_latest_eden_block_db()
w3 = get_web3_provider()
latest_eth_block = w3.eth.get_block('latest')['number']
if latest_eth_block > eden_db_last_block:
return latest_eth_block
else:
return None
def get_latest_eden_block_db():
eden_db_last_block = session.query(EdenBlock).order_by(desc(EdenBlock.block_number)).limit(1).all()
if eden_db_last_block != []:
eden_db_last_block = eden_db_last_block[0].block_number
else:
eden_db_last_block = 0
return eden_db_last_block
def clean_epoch_entry(epoch_string):
epoch_number = int(epoch_string.split('+')[1].replace('epoch', ''))
return int(epoch_number)
def get_latest_distribution_number():
eden_db_last_number_query = session.query(Distribution).order_by(desc(Distribution.distribution_number)).limit(1).all()
if eden_db_last_number_query != []:
eden_last_number = eden_db_last_number_query[0].distribution_number
return eden_last_number
else:
return 0
def ipfs_link_cleanup(raw_uri):
final_ipfs_link = "https://ipfs.io/ipfs/" + raw_uri.split('//')[1]
return final_ipfs_link
def graph_query_call(api, query, variables=None):
request = requests.post(api, json={'query': query, 'variables': variables})
if request.status_code == 200:
return request.json()
else:
Exception('Query failed. return code is {}. {}'.format(request.status_code, query))
def fetch_query(query):
query_file = query_dict.get(query)
with open(query_file, 'r') as file:
data = file.read()
return data
def get_epoch_number(block_number):
epoch_number_query = session.query(Epoch).filter(block_number >= Epoch.start_block_number, block_number <= Epoch.end_block_number).limit(1).all()
if epoch_number_query != []:
epoch_number = epoch_number_query[0].epoch_number
return epoch_number
else:
latest_epoch = get_latest_epoch()
return latest_epoch
def get_latest_epoch():
query = fetch_query('epoch_latest')
latest_epoch_result = graph_query_call(eden_governance_api, query)
latest_epoch_id = latest_epoch_result['data']['epoches'][0]['id']
latest_epoch_number = clean_epoch_entry(latest_epoch_id)
return latest_epoch_number
def get_block_number_from_id(block_id):
query = fetch_query('block_lookup')
variables = {'block_id': block_id}
block_result = graph_query_call(eden_governance_api, query, variables)
eden_block_number = int(block_result['data']['block']['number'])
return eden_block_number
def eden_block_call():
last_block = 0
last_block_current = get_latest_eth_block()
eden_blocks_df = pd.DataFrame()
while True:
query = fetch_query('block')
variables = {'number_gt': last_block}
block_result = graph_query_call(eden_governance_api, query, variables)
eden_blocks_df_temp = pd.DataFrame.from_dict(block_result['data']['blocks'])
eden_blocks_df = eden_blocks_df.append(eden_blocks_df_temp)
last_block = int(eden_blocks_df.iloc[-1]['number'])
if last_block >= last_block_current:
break
eden_blocks_df = eden_blocks_df.drop_duplicates()
logging.info('Eden Blocks Pulled To DataFrame')
logging.info('Adding Eden Blocks To Database Now')
eden_last_block_db = get_latest_eden_block_db()
eden_blocks_df = eden_blocks_df[pd.to_numeric(eden_blocks_df['number']) >= eden_last_block_db]
for index, row in eden_blocks_df.iterrows():
block_id_query = session.query(EdenBlock).filter(EdenBlock.id==row['id']).limit(1).all() or None
if block_id_query is None:
epoch_number = get_epoch_number(row['number'])
eden_block_entry = EdenBlock(
id = row['id'],
author = row['author'],
difficulty = row['difficulty'],
gas_limit = row['gasLimit'],
gas_used = row['gasUsed'],
block_hash = row['hash'],
block_number = row['number'],
parent_hash = row['parentHash'],
uncle_hash = row['unclesHash'],
size = row['size'],
state_root = row['stateRoot'],
timestamp = datetime.fromtimestamp(int(row['timestamp'])),
total_difficulty = row['totalDifficulty'],
transactions_root = row['transactionsRoot'],
receipts_root = row['receiptsRoot'],
epoch_number = epoch_number
)
session.add(eden_block_entry)
session.commit()
logging.info('Eden Blocks Added To Database Now')
def eden_epoch_call():
eden_epochs_df = pd.DataFrame()
query = fetch_query('epoch')
epoch_result = graph_query_call(eden_governance_api, query)
eden_epochs_df = pd.DataFrame.from_dict(epoch_result['data']['epoches'])
logging.info('Eden Epochs Pulled To DataFrame')
logging.info('Adding Eden Epochs To Database Now')
for index, row in eden_epochs_df.iterrows():
epoch_id_query = session.query(Epoch).filter(Epoch.id==row['id']).limit(1).all() or None
if epoch_id_query is None and row['finalized'] == True:
epoch = clean_epoch_entry(row['id'])
start_block_number = get_block_number_from_id(row['startBlock']['id'])
end_block_number = get_block_number_from_id(row['endBlock']['id'])
epoch_entry = Epoch(
id = row['id'],
finalized = row['finalized'],
epoch_number = epoch,
start_block = row['startBlock']['id'],
start_block_number = start_block_number,
end_block_number = end_block_number,
end_block = row['endBlock']['id'],
producer_blocks = row['producerBlocks'],
all_blocks = row['allBlocks'],
producer_blocks_ratio = row['producerBlocksRatio']
)
session.add(epoch_entry)
session.commit()
logging.info('Epochs Added To Database Now')
def eden_distribution_call():
eden_distribution = pd.DataFrame()
session = DBSession()
distribution_number = get_latest_distribution_number()
if distribution_number is None:
distribution_number = 0
query = fetch_query('distribution')
variables = {'number_gt': distribution_number}
distribution_result = graph_query_call(eden_distribution_api, query, variables)
distribution_df = | pd.DataFrame.from_dict(distribution_result['data']['distributions']) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on 2017-8-24
@author: cheng.li
"""
import bisect
import datetime as dt
from typing import Iterable
from typing import Union
import numpy as np
import pandas as pd
from simpleutils.asserts import require
from PyFin.DateUtilities import Period
from PyFin.api import BizDayConventions
from PyFin.api import DateGeneration
from PyFin.api import advanceDateByCalendar
from PyFin.api import makeSchedule
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.sqlengine import total_risk_factors
from alphamind.data.engines.universe import Universe
from alphamind.data.processing import factor_processing
from alphamind.data.transformer import Transformer
from alphamind.utilities import alpha_logger
from alphamind.utilities import map_freq
def _merge_df(engine, names, factor_df, target_df, universe, dates, risk_model, neutralized_risk):
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(total_risk_factors).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
target_df = pd.merge(target_df, risk_df, on=['trade_date', 'code']).dropna()
if neutralized_risk:
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_y = target_df.copy()
risk_exp = train_x[neutralized_risk].values.astype(float)
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
else:
risk_exp = None
train_x = factor_df.copy()
train_y = target_df.copy()
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
codes = train_x['code'].values
date_label = pd.DatetimeIndex(factor_df.trade_date).to_pydatetime()
dates = np.unique(date_label)
return target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes
def prepare_data(engine: SqlEngine,
factors: Union[Transformer, Iterable[object]],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None):
if warm_start > 0:
p = Period(frequency)
p = Period(length=-warm_start * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', start_date, p).strftime('%Y-%m-%d')
dates = makeSchedule(start_date,
end_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Forward)
dates = [d.strftime('%Y-%m-%d') for d in dates]
horizon = map_freq(frequency)
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
factor_df = engine.fetch_factor_range(universe,
factors=transformer,
dates=dates).sort_values(['trade_date', 'code'])
alpha_logger.info("factor data loading finished")
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
alpha_logger.info("fit target data loading finished")
industry_df = engine.fetch_industry_range(universe, dates=dates)
alpha_logger.info("industry data loading finished")
benchmark_df = engine.fetch_benchmark_range(benchmark, dates=dates)
alpha_logger.info("benchmark data loading finished")
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
df = pd.merge(df, benchmark_df, on=['trade_date', 'code'], how='left')
df = | pd.merge(df, industry_df, on=['trade_date', 'code']) | pandas.merge |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import mars.dataframe as md
import mars.tensor as mt
from mars.tests.core import TestBase, ExecutorForTest
class Test(TestBase):
def setUp(self):
super().setUp()
self.executor = ExecutorForTest()
def testSetIndex(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
expected = df1.set_index('y', drop=True)
df3 = df2.set_index('y', drop=True)
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df3, concat=True)[0])
expected = df1.set_index('y', drop=False)
df4 = df2.set_index('y', drop=False)
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df4, concat=True)[0])
def testILocGetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1.iloc[1]
df3 = df2.iloc[1]
pd.testing.assert_series_equal(
expected, self.executor.execute_dataframe(df3, concat=True, check_series_name=False)[0])
# plain index on axis 1
expected = df1.iloc[:2, 1]
df4 = df2.iloc[:2, 1]
pd.testing.assert_series_equal(
expected, self.executor.execute_dataframe(df4, concat=True)[0])
# slice index
expected = df1.iloc[:, 2:4]
df5 = df2.iloc[:, 2:4]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df5, concat=True)[0])
# plain fancy index
expected = df1.iloc[[0], [0, 1, 2]]
df6 = df2.iloc[[0], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df6, concat=True)[0])
# plain fancy index with shuffled order
expected = df1.iloc[[0], [1, 2, 0]]
df7 = df2.iloc[[0], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df7, concat=True)[0])
# fancy index
expected = df1.iloc[[1, 2], [0, 1, 2]]
df8 = df2.iloc[[1, 2], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df8, concat=True)[0])
# fancy index with shuffled order
expected = df1.iloc[[2, 1], [1, 2, 0]]
df9 = df2.iloc[[2, 1], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df9, concat=True)[0])
# one fancy index
expected = df1.iloc[[2, 1]]
df10 = df2.iloc[[2, 1]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df10, concat=True)[0])
# plain index
expected = df1.iloc[1, 2]
df11 = df2.iloc[1, 2]
self.assertEqual(
expected, self.executor.execute_dataframe(df11, concat=True)[0])
# bool index array
expected = df1.iloc[[True, False, True], [2, 1]]
df12 = df2.iloc[[True, False, True], [2, 1]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df12, concat=True)[0])
# bool index array on axis 1
expected = df1.iloc[[2, 1], [True, False, True]]
df14 = df2.iloc[[2, 1], [True, False, True]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df14, concat=True)[0])
# bool index
expected = df1.iloc[[True, False, True], [2, 1]]
df13 = df2.iloc[md.Series([True, False, True], chunk_size=1), [2, 1]]
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df13, concat=True)[0])
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3).iloc[:3]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[:3])
series = md.Series(data, chunk_size=3).iloc[4]
self.assertEqual(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[4])
series = md.Series(data, chunk_size=3).iloc[[2, 3, 4, 9]]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[[2, 3, 4, 9]])
series = md.Series(data, chunk_size=3).iloc[[4, 3, 9, 2]]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[[4, 3, 9, 2]])
series = md.Series(data).iloc[5:]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
series = md.Series(data).iloc[selection]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[selection])
# bool index
series = md.Series(data).iloc[md.Series(selection, chunk_size=4)]
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data.iloc[selection])
def testILocSetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1
expected.iloc[1] = 100
df2.iloc[1] = 100
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# slice index
expected.iloc[:, 2:4] = 1111
df2.iloc[:, 2:4] = 1111
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# plain fancy index
expected.iloc[[0], [0, 1, 2]] = 2222
df2.iloc[[0], [0, 1, 2]] = 2222
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# fancy index
expected.iloc[[1, 2], [0, 1, 2]] = 3333
df2.iloc[[1, 2], [0, 1, 2]] = 3333
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# plain index
expected.iloc[1, 2] = 4444
df2.iloc[1, 2] = 4444
pd.testing.assert_frame_equal(
expected, self.executor.execute_dataframe(df2, concat=True)[0])
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3)
series.iloc[:3] = 1
data.iloc[:3] = 1
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data)
series.iloc[4] = 2
data.iloc[4] = 2
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data)
series.iloc[[2, 3, 4, 9]] = 3
data.iloc[[2, 3, 4, 9]] = 3
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data)
series.iloc[5:] = 4
data.iloc[5:] = 4
pd.testing.assert_series_equal(
self.executor.execute_dataframe(series, concat=True)[0], data)
def testLocGetItem(self):
rs = np.random.RandomState(0)
# index and columns are labels
raw1 = pd.DataFrame(rs.randint(10, size=(5, 4)),
index=['a1', 'a2', 'a3', 'a4', 'a5'],
columns=['a', 'b', 'c', 'd'])
# columns are labels
raw2 = raw1.copy()
raw2.reset_index(inplace=True, drop=True)
# columns are non unique and monotonic
raw3 = raw1.copy()
raw3.columns = ['a', 'b', 'b', 'd']
# columns are non unique and non monotonic
raw4 = raw1.copy()
raw4.columns = ['b', 'a', 'b', 'd']
# index that is timestamp
raw5 = raw1.copy()
raw5.index = pd.date_range('2020-1-1', periods=5)
df1 = md.DataFrame(raw1, chunk_size=2)
df2 = md.DataFrame(raw2, chunk_size=2)
df3 = md.DataFrame(raw3, chunk_size=2)
df4 = md.DataFrame(raw4, chunk_size=2)
df5 = md.DataFrame(raw5, chunk_size=2)
df = df2.loc[3, 'b']
result = self.executor.execute_tensor(df, concat=True)[0]
expected = raw2.loc[3, 'b']
self.assertEqual(result, expected)
df = df1.loc['a3', 'b']
result = self.executor.execute_tensor(df, concat=True, check_shape=False)[0]
expected = raw1.loc['a3', 'b']
self.assertEqual(result, expected)
df = df2.loc[1:4, 'b':'d']
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[1:4, 'b': 'd']
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:4, 'b':]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[:4, 'b':]
pd.testing.assert_frame_equal(result, expected)
# slice on axis index whose index_value does not have value
df = df1.loc['a2':'a4', 'b':]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw1.loc['a2':'a4', 'b':]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:, 'b']
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[:, 'b']
pd.testing.assert_series_equal(result, expected)
# 'b' is non-unique
df = df3.loc[:, 'b']
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw3.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# 'b' is non-unique, and non-monotonic
df = df4.loc[:, 'b']
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw4.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# label on axis 0
df = df1.loc['a2', :]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw1.loc['a2', :]
pd.testing.assert_series_equal(result, expected)
# label-based fancy index
df = df2.loc[[3, 0, 1], ['c', 'a', 'd']]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[[3, 0, 1], ['c', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index, asc sorted
df = df2.loc[[0, 1, 3], ['a', 'c', 'd']]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw2.loc[[0, 1, 3], ['a', 'c', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index in which non-unique exists
selection = rs.randint(2, size=(5,), dtype=bool)
df = df3.loc[selection, ['b', 'a', 'd']]
result = self.executor.execute_dataframe(df, concat=True)[0]
expected = raw3.loc[selection, ['b', 'a', 'd']]
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts= | bdate_range("2012-01-01", periods=300) | pandas.bdate_range |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""reana_bench script - benchmark script for REANA cluster"""
import concurrent.futures
import json
import logging
import os
import subprocess
import time
from datetime import datetime
from functools import lru_cache, partial
from pathlib import Path
from typing import Optional, List, NoReturn, Dict, Tuple
import click
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
import urllib3
from matplotlib.figure import Figure
from matplotlib.ticker import MaxNLocator
from reana_client.api.client import (
start_workflow,
create_workflow,
upload_to_server,
)
from reana_client.utils import load_reana_spec
urllib3.disable_warnings()
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(message)s",
level=logging.WARNING,
force=True,
)
logger = logging.getLogger("reana-bench")
logger.setLevel(logging.INFO)
REANA_ACCESS_TOKEN = os.getenv("REANA_ACCESS_TOKEN")
# 2 or more workers could hit reana-server API rate limit sometimes
WORKERS_DEFAULT_COUNT = 1
# common datetime format
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
CURRENT_WORKING_DIRECTORY = os.getcwd()
@click.group()
def cli():
"""reana_bench script - runs single workflow multiple times, collects results, analyzes them.
Prerequisites:
- install reana-client 0.8.x, pandas and matplotlib Python packages
- set REANA_ACCESS_TOKEN and REANA_SERVER_URL
How to launch 50 concurrent workflows and collect results (option 1):
.. code-block:: console
\b
$ cd reana-demo-root6-roofit # find an example of REANA workflow
$ reana_bench.py launch -w roofit50yadage -n 1-50 -f reana-yadage.yaml # submit and start
$ reana_bench.py collect -w roofit50yadage # collect results and save them locally
$ reana_bench.py analyze -w roofit50yadage -n 1-50 # analyzes results that were saved locally
How to launch 50 concurrent workflows and collect results (option 2):
.. code-block:: console
\b
$ cd reana-demo-root6-roofit # find an example of REANA workflow
$ reana_bench.py submit -w roofit50yadage -n 1-50 -f reana-yadage.yaml # submit, do not start
$ reana_bench.py start -w roofit50yadage -n 1-50 # start workflows
$ reana_bench.py collect -w roofit50yadage # collect results and save them locally
$ reana_bench.py analyze -w roofit50yadage -n 1-50 # analyzes results that were saved locally
"""
pass
def _create_workflow(workflow: str, file: str) -> None:
reana_specification = load_reana_specification(file)
create_workflow(reana_specification, workflow, REANA_ACCESS_TOKEN)
@lru_cache(maxsize=None)
def load_reana_specification(reana_file_path: str) -> Dict:
return load_reana_spec(
click.format_filename(reana_file_path),
access_token=REANA_ACCESS_TOKEN,
skip_validation=True,
)
def _upload_workflow(workflow: str, file: str) -> None:
reana_specification = load_reana_specification(file)
filenames = []
if "inputs" in reana_specification:
filenames += [
os.path.join(CURRENT_WORKING_DIRECTORY, f)
for f in reana_specification["inputs"].get("files") or []
]
filenames += [
os.path.join(CURRENT_WORKING_DIRECTORY, d)
for d in reana_specification["inputs"].get("directories") or []
]
for filename in filenames:
upload_to_server(workflow, filename, REANA_ACCESS_TOKEN)
def _create_and_upload_single_workflow(workflow_name: str, reana_file: str) -> None:
absolute_file_path = f"{CURRENT_WORKING_DIRECTORY}/{reana_file}"
_create_workflow(workflow_name, absolute_file_path)
_upload_workflow(workflow_name, absolute_file_path)
def _build_extended_workflow_name(workflow: str, run_number: int) -> str:
return f"{workflow}-{run_number}"
def _create_and_upload_workflows(
workflow: str,
workflow_range: (int, int),
file: Optional[str] = None,
workers: int = WORKERS_DEFAULT_COUNT,
) -> None:
logger.info(f"Creating and uploading {workflow_range} workflows...")
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
futures = [
executor.submit(
_create_and_upload_single_workflow,
_build_extended_workflow_name(workflow, i),
file,
)
for i in range(workflow_range[0], workflow_range[1] + 1)
]
for future in concurrent.futures.as_completed(futures):
# collect results, in case of exception, it will be raised here
future.result()
def _get_utc_now_timestamp() -> str:
return datetime.utcnow().strftime(DATETIME_FORMAT)
def _start_single_workflow(workflow_name: str) -> (str, str):
try:
start_workflow(workflow_name, REANA_ACCESS_TOKEN, {})
except Exception as e:
raise Exception(
f"Workflow {workflow_name} failed during the start. Details: {e}"
)
asked_to_start_datetime = _get_utc_now_timestamp()
return workflow_name, asked_to_start_datetime
def _create_empty_dataframe_for_started_results() -> pd.DataFrame:
return pd.DataFrame(columns=["name", "asked_to_start_date"])
def _start_workflows_and_record_start_time(
workflow_name: str, workflow_range: (int, int), workers: int = WORKERS_DEFAULT_COUNT
) -> pd.DataFrame:
logger.info(f"Starting {workflow_range} workflows...")
df = _create_empty_dataframe_for_started_results()
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
futures = [
executor.submit(
_start_single_workflow, _build_extended_workflow_name(workflow_name, i)
)
for i in range(workflow_range[0], workflow_range[1] + 1)
]
for future in concurrent.futures.as_completed(futures):
try:
workflow_name, asked_to_start_datetime = future.result()
df = df.append(
{
"name": workflow_name,
"asked_to_start_date": asked_to_start_datetime,
},
ignore_index=True,
)
except Exception as e:
logger.error(e)
return df
def _get_workflows(workflow_prefix: str) -> pd.DataFrame:
# TODO: in case of big number of workflows, this function can take a long time
# maybe, consider pagination and page size
cmd = _build_reana_client_list_command(workflow_prefix)
return pd.DataFrame(json.loads(subprocess.check_output(cmd).decode("ascii")))
def _build_reana_command(command_type: str, workflow_name: str) -> List[str]:
return ["reana-client", command_type, "-w", workflow_name]
def _build_reana_client_list_command(
workflow: str, page: Optional[int] = None, size: Optional[int] = None
) -> List[str]:
base_cmd = ["reana-client", "list", "--json", "--filter", f"name={workflow}"]
if page:
base_cmd.append("--page")
base_cmd.append(str(page))
if size:
base_cmd.append("--size")
base_cmd.append(str(size))
return base_cmd
def _workflows_finished(df: pd.DataFrame) -> bool:
return df["status"].isin(["failed", "finished"]).all()
def _convert_str_date_to_epoch(s: str) -> int:
return int(time.mktime(datetime.strptime(s, DATETIME_FORMAT).timetuple()))
def _clean_results(df: pd.DataFrame) -> pd.DataFrame:
logging.info("Cleaning results...")
# fix "-" values for created status
df.loc[df["status"] == "created", "started"] = None
df.loc[df["status"] == "created", "ended"] = None
df["asked_to_start_date"] = df.apply(
lambda row: None
if | pd.isna(row["asked_to_start_date"]) | pandas.isna |
"""
Name: diffusion_functions
Purpose: Contains functions to calculate diffusion of distributed wind model
(1) Determine maximum market size as a function of payback time;
(2) Parameterize Bass diffusion curve with diffusion rates (p, q) set by
payback time;
(3) Determine current stage (equivaluent time) of diffusion based on
existing market and current economics; and
(3) Calculate new market share by stepping forward on diffusion curve.
"""
import numpy as np
import pandas as pd
import config
import utility_functions as utilfunc
import decorators
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#=============================================================================
# ^^^^ Diffusion Calculator ^^^^
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_diffusion_solar(df, is_first_year, bass_params, year,
override_p_value = None, override_q_value = None, override_teq_yr1_value = None):
"""
Calculates the market share (ms) added in the solve year. Market share must be less
than max market share (mms) except initial ms is greater than the calculated mms.
For this circumstance, no diffusion allowed until mms > ms. Also, do not allow ms to
decrease if economics deterioriate. Using the calculated
market share, relevant quantities are updated.
IN: df - pd dataframe - Main dataframe
OUT: df - pd dataframe - Main dataframe
market_last_year - pd dataframe - market to inform diffusion in next year
"""
df = df.reset_index()
bass_params = bass_params[bass_params['tech']=='solar']
# set p/q/teq_yr1 params
df = pd.merge(df, bass_params[['state_abbr', 'bass_param_p', 'bass_param_q', 'teq_yr1', 'sector_abbr']], how = 'left', on = ['state_abbr','sector_abbr'])
# calc diffusion market share
df = calc_diffusion_market_share(df, is_first_year)
# market share floor is based on last year's market share
df['market_share'] = np.maximum(df['diffusion_market_share'], df['market_share_last_year'])
# calculate the "new" market share (old - current)
df['new_market_share'] = df['market_share'] - df['market_share_last_year']
# cap the new_market_share where the market share exceeds the max market share
df['new_market_share'] = np.where(df['market_share'] > df['max_market_share'], 0, df['new_market_share'])
# calculate new adopters, capacity and market value
df['new_adopters'] = df['new_market_share'] * df['developable_agent_weight']
df['new_market_value'] = df['new_adopters'] * df['system_kw'] * df['system_capex_per_kw']
df['new_system_kw'] = df['new_adopters'] * df['system_kw']
df['new_batt_kw'] = df['new_adopters'] * df['batt_kw']
df['new_batt_kwh'] = df['new_adopters'] * df['batt_kwh']
# then add these values to values from last year to get cumulative values:
df['number_of_adopters'] = df['adopters_cum_last_year'] + df['new_adopters']
df['market_value'] = df['market_value_last_year'] + df['new_market_value']
df['system_kw_cum'] = df['system_kw_cum_last_year'] + df['new_system_kw']
df['batt_kw_cum'] = df['batt_kw_cum_last_year'] + df['new_batt_kw']
df['batt_kwh_cum'] = df['batt_kwh_cum_last_year'] + df['new_batt_kwh']
# constrain state-level capacity totals to known historical values
if year in (2014, 2016, 2018):
group_cols = ['state_abbr', 'sector_abbr', 'year']
state_capacity_total = (df[group_cols+['system_kw_cum', 'batt_kw_cum', 'batt_kwh_cum', 'agent_id']].groupby(group_cols)
.agg({'system_kw_cum':'sum', 'batt_kw_cum':'sum', 'batt_kwh_cum':'sum', 'agent_id':'count'})
.rename(columns={'system_kw_cum':'state_solar_kw_cum', 'batt_kw_cum':'state_batt_kw_cum', 'batt_kwh_cum':'state_batt_kwh_cum', 'agent_id':'agent_count'})
.reset_index())
# coerce dtypes
state_capacity_total.state_solar_kw_cum = state_capacity_total.state_solar_kw_cum.astype(np.float64)
state_capacity_total.state_batt_kw_cum = state_capacity_total.state_batt_kw_cum.astype(np.float64)
state_capacity_total.state_batt_kwh_cum = state_capacity_total.state_batt_kwh_cum.astype(np.float64)
df.system_kw_cum = df.system_kw_cum.astype(np.float64)
df.batt_kw_cum = df.batt_kw_cum.astype(np.float64)
df.batt_kwh_cum = df.batt_kwh_cum.astype(np.float64)
# merge state totals back to agent df
df = pd.merge(df, state_capacity_total, how = 'left', on = ['state_abbr', 'sector_abbr', 'year'])
# read csv of historical capacity values by state and sector
historical_state_df = | pd.read_csv(config.OBSERVED_DEPLOYMENT_BY_STATE) | pandas.read_csv |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
msg = "cannot use an invert condition when passing to numexpr"
with pytest.raises(NotImplementedError, match=msg):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from datetime import datetime
import numpy as np
import pandas as pd
import pygsheets
import json
with open('./config.json') as config:
creds = json.load(config)['google']
def convert_int(value):
value = str(value).lower()
value = value.replace('fewer than five', '0')
value = value.replace('fewer than 5', '0')
value = value.replace('<5', '0')
value = value.replace('approximately', '')
value = value.replace('approx.', '').strip()
value = value.replace(',', '').replace('.0', '').replace('.', '')
return int(value)
def split_high_low(df, col):
df = df.copy()
data = df[col].str.split(" to ", n=1, expand=True).fillna(0)
df[f'{col}_low'] = data[0].apply(convert_int)
df[f'{col}_high'] = data[1].apply(convert_int)
df[f'{col}_avg'] = (df[f'{col}_low'] + df[f'{col}_high'])/2
return df
def clean_facility_city(string):
if string == None:
return
string = string.replace(')','')
string = string.split('-')[0]
return string.strip()
def sync_sheets(df, sheet_name):
print(f'[status] syncing google sheet {sheet_name}')
# google sheets authentication
api = pygsheets.authorize(service_file=creds, retries=5)
wb = api.open('ri-covid-19')
# open the google spreadsheet
sheet = wb.worksheet_by_title(f'{sheet_name}')
sheet.set_dataframe(df, (1,1))
def clean_general(fname):
print('[status] cleaning statwide general info')
df = pd.read_csv(f'./data/raw/{fname}.csv', parse_dates=['date'])
# remove total causing errors
df = df[df['metric'] != 'Cumulative people who tested positive (counts first positive lab per person) plus cumulative negative tests (may count people more than once)']
# re name metrics to shorten them
df.loc[(df['metric'].str.contains('positive')) & (df['date'] < '2020-07-13'), 'metric'] = 'RI positive cases'
df.loc[(df['metric'].str.contains('negative')) & (df['date'] < '2020-07-13'), 'metric'] = 'RI negative results'
df.loc[(df['metric'].str.contains('self-quarantine')) & (df['date'] < '2020-07-13'), 'metric'] = 'instructed to self-quarantine'
df.loc[(df['metric'].str.contains('hospitalized')) & (df['date'] < '2020-07-13'), 'metric'] = 'currently hospitalized'
df.loc[(df['metric'].str.contains('die')) & (df['date'] < '2020-07-13'), 'metric'] = 'total deaths'
df.loc[(df['metric'].str.contains('fatalities')) & (df['date'] < '2020-07-13'), 'metric'] = 'total deaths'
df.loc[(df['metric'].str.contains('ventilators')) & (df['date'] < '2020-07-13'), 'metric'] = 'currently on ventilator'
df.loc[(df['metric'].str.contains('on a vent')) & (df['date'] < '2020-07-13'), 'metric'] = 'currently on ventilator'
df.loc[(df['metric'].str.contains('intensive care')) & (df['date'] < '2020-07-13'), 'metric'] = 'currently in icu'
df.loc[(df['metric'].str.contains('discharged')) & (df['date'] < '2020-07-13'), 'metric'] = 'total discharged'
df.loc[df['metric'].str.contains('Cumulative people who tested positive '), 'metric'] = 'people positive'
df.loc[df['metric'].str.contains('Cumulative people tested '), 'metric'] = 'people tested'
df.loc[df['metric'].str.contains('New people who tested positive'), 'metric'] = 'new positive'
df.loc[df['metric'].str.contains('Cumlative people who tested positive'), 'metric'] = 'RI positive cases'
df.loc[df['metric'].str.contains('Cumlative people who have only tested negative'), 'metric'] = 'RI negative results'
df.loc[df['metric'].str.contains('Currently hospitalized'), 'metric'] = 'currently hospitalized'
df.loc[df['metric'].str.contains('Currently in ICU'), 'metric'] = 'currently in icu'
df.loc[df['metric'].str.contains('Currently vented'), 'metric'] = 'currently on ventilator'
df.loc[df['metric'].str.contains('Total deaths'), 'metric'] = 'total deaths'
# convert types count -> int, date -> datetime str
df['count'] = df['count'].apply(convert_int)
# pivot to get total tests given out then un-pivot
df = df.pivot_table(index='date', columns='metric', values='count').reset_index()
df['RI total tests'] = df['RI positive cases'] + df['RI negative results']
df = df.melt(col_level=0, id_vars=['date'], value_name='count').sort_values(by=['date', 'metric'])
# get daily changes
df['count'] = df['count'].fillna(0)
df['new_cases'] = df.groupby('metric')['count'].diff().fillna(0).astype(int)
df['change_%'] = df.groupby('metric')['count'].pct_change().replace(np.inf, 0).fillna(0)
# add date format
df['date'] = pd.to_datetime(df['date']).dt.strftime('%m/%d/%Y')
# save & sync to google sheets
df.to_csv('./data/clean/ri-covid-19-clean.csv', index=False)
sync_sheets(df, 'statewide')
def clean_geographic(fname):
print('[status] cleaning city/town info')
df = pd.read_csv(f'./data/raw/{fname}.csv')
pop = pd.read_csv('./data/external/population_est_2017.csv')
# remove under 5 surpressed values
df['count'] = df['count'].apply(convert_int)
df['count'] = df['count'].fillna(0)
# sort by date
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 11:46:57 2020
@author: reideej1
:DESCRIPTION: Evaluate coaching data for the last 50 years of college football
- the goal is to determine how coaches who struggle in their first 3 years
fare over time at the same program
:REQUIRES: scrape_sports_reference.py located in: cfbAnalysis\src\data
:TODO:
"""
#==============================================================================
# Package Import
#==============================================================================
import datetime
import glob
import os
import numpy as np
import pandas as pd
import pathlib
import time
import tqdm
from src.data.scrape_sports_reference import *
#==============================================================================
# Reference Variable Declaration
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def renameSchool(df, name_var):
'''
Purpose: Rename a school/university to a standard name as specified in
the file `school_abbreviations.csv`
Inputs
------
df : Pandas Dataframe
DataFrame containing a school-name variable for which the names
need to be standardized
name_var : string
Name of the variable which is to be renamed/standardized
Outputs
-------
list(row)[0] : string
Standardized version of the school's name based on the first value
in the row in the file `school_abbreviations.csv`
'''
# read in school name information
df_school_names = pd.read_csv(r'references\names_pictures_ncaa.csv')
# convert the dataframe to a dictionary such that the keys are the
# optional spelling of each school and the value is the standardized
# name of the school
dict_school_names = {}
for index, row in df_school_names.iterrows():
# isolate the alternative name columns
names = row[[x for x in row.index if 'Name' in x]]
# convert the row to a list that doesn't include NaN values
list_names = [x for x in names.values.tolist() if str(x) != 'nan']
# add the nickname to the team names as an alternative name
nickname = row['Nickname']
list_names_nicknames = list_names.copy()
for name in list_names:
list_names_nicknames.append(name + ' ' + nickname)
# extract the standardized team name
name_standardized = row['Team']
# add the standardized name
list_names_nicknames.append(name_standardized)
# add the nickname to the standardized name
list_names_nicknames.append(name_standardized + ' ' + nickname)
# for every alternative spelling of the team, set the value to be
# the standardized name
for name_alternate in list_names_nicknames:
dict_school_names[name_alternate] = name_standardized
# df[name_var] = df[name_var].apply(
# lambda x: dict_school_names[x] if str(x) != 'nan' else '')
df[name_var] = df[name_var].apply(
lambda x: rename_school_helper(x, dict_school_names))
return df
def rename_school_helper(name_school, dict_school_names):
try:
if str(name_school) != 'nan':
return dict_school_names[name_school]
else:
return ''
except:
print(f'School not found in school abbreviations .csv file: {name_school} ')
return name_school
def create_coach_dataframe(df_schools):
'''
Purpose: Given historic school data, create a dataframe of coaches and
their performance data on a year-by-year basis
Inputs
------
df_schools : Pandas DataFrame
Contains year-by-year results for each school (with coaches' names)
Outputs
-------
df_coaches : Pandas DataFrame
A dataframe containing all historic season data from a coaching perspective
'''
# Create a dictionary that assigns each school to its current conference
df_conf = df_schools.groupby(['School', 'Conf']).head(1).groupby('School').head(1).reset_index(drop = True)
df_conf = df_conf[['School', 'Conf']]
df_conf['Power5'] = df_conf.apply(lambda row: True if row['Conf'] in [
'SEC', 'Pac-12', 'Big 12', 'ACC', 'Big Ten'] else False, axis = 1)
df_conf = df_conf.set_index('School')
dict_conf = df_conf.to_dict(orient = 'index')
# Create a coaching dataframe by iterating over every year for every school
list_coaches = []
for index, row in df_schools.iterrows():
# handle every coach that coached that season
for coach in row['Coach(es)'].split(', '):
dict_coach_year = {}
dict_coach_year['coach'] = coach.split(' (')[0].strip()
dict_coach_year['year'] = row['Year']
dict_coach_year['school'] = row['School']
dict_coach_year['ranking_pre'] = row['AP_Pre']
dict_coach_year['ranking_high'] = row['AP_High']
dict_coach_year['ranking_post'] = row['AP_Post']
dict_coach_year['ranked_pre'] = not pd.isna(row['AP_Pre'])
dict_coach_year['ranked_post'] = not pd.isna(row['AP_Post'])
try:
dict_coach_year['ranked_top_10'] = row['AP_Post'] <= 10
except:
print(row['AP_Post'])
dict_coach_year['ranked_top_5'] = row['AP_Post'] <= 5
# handle bowl games
if pd.isna(row['Bowl']):
dict_coach_year['bowl'] = False
dict_coach_year['bowl_name'] = ''
dict_coach_year['bowl_win'] = False
else:
dict_coach_year['bowl'] = True
dict_coach_year['bowl_name'] = row['Bowl'].split('-')[0]
if '-' in str(row['Bowl']):
try:
if row['Bowl'].split('-')[1] == 'W':
dict_coach_year['bowl_win'] = True
except:
print(row['Bowl'])
# handle wins and losses
if len(coach.split('(')[1].split('-')) > 2:
dict_coach_year['W'] = coach.split('(')[1].split('-')[0]
dict_coach_year['L'] = coach.split('(')[1].split('-')[1].strip(')')
dict_coach_year['T'] = coach.split('(')[1].split('-')[2].strip(')')
else:
dict_coach_year['W'] = coach.split('(')[1].split('-')[0]
dict_coach_year['L'] = coach.split('(')[1].split('-')[1].strip(')')
# assign conference information
dict_coach_year['conf'] = dict_conf[row['School']]['Conf']
dict_coach_year['power5'] = dict_conf[row['School']]['Power5']
list_coaches.append(dict_coach_year)
# Convert list to DataFrame
df_coaches = | pd.DataFrame(list_coaches) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import flask
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
from plotly import graph_objs as go
from datetime import datetime, date
from dash.exceptions import PreventUpdate
from ..app import app
import pandas_datareader.data as web
import random
import requests
# import plotly.plotly as py
# import json
# with open("./data/ticker.json", "r") as read_file:
# labelFull = json.load(read_file)
dfStock = pd.read_csv('./data/stock.csv')
dfETF = | pd.read_csv("./data/ETF.csv") | pandas.read_csv |
import pytest
import collections
from pathlib import Path
import pandas as pd
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Constant, Annotator
import pypipegraph as ppg
from pypipegraph.testing import run_pipegraph, force_load
from pandas.testing import assert_frame_equal
from mbf_genomics.util import find_annos_from_column
class LenAnno(Annotator):
def __init__(self, name):
self.columns = [name]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
@pytest.mark.usefixtures("no_pipegraph")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameDirect:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_create_from_df(self):
test_df = pd.DataFrame({"A": [1, 2]})
a = DelayedDataFrame("shu", test_df)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write()[1]
assert "/sha" in str(fn.parent.absolute())
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), test_df)
def test_write_excel(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_excel2(self):
data = {}
for i in range(0, 257):
c = "A%i" % i
d = [1, 1]
data[c] = d
test_df = pd.DataFrame(data)
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_mangle(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert (a.non_annotator_columns == ["A", "B"]).all()
def mangle(df):
df = df.drop("A", axis=1)
df = df[df.B == "c"]
return df
fn = a.write("test.csv", mangle)[1]
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), mangle(test_df))
def test_magic(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
a = DelayedDataFrame("shu", lambda: test_df)
assert hash(a)
assert a.name in str(a)
assert a.name in repr(a)
def test_annotator(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("column", "value")
a.annotate()
assert "column" in a.df.columns
assert (a.df["column"] == "value").all()
def test_add_non_anno(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(TypeError):
a += 5
def test_annotator_wrong_columns(self):
class WrongConstant(Annotator):
def __init__(self, column_name, value):
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({"shu": self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(ValueError):
a += WrongConstant("column", "value")
def test_annotator_minimum_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
assert "Direct" in str(a.load_strategy)
class MissingCalc(Annotator):
column_names = ["shu"]
with pytest.raises(AttributeError):
a += MissingCalc()
class EmptyColumnNames(Annotator):
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNames()
class EmptyColumnNamesButCacheName(Annotator):
cache_name = "shu"
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNamesButCacheName()
class MissingColumnNames(Annotator):
def calc(self, df):
pass
with pytest.raises(AttributeError):
a += MissingColumnNames()
class NonListColumns(Annotator):
columns = "shu"
def calc(self, df):
pass
with pytest.raises(ValueError):
a += NonListColumns()
def test_DynamicColumNames(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class Dynamic(Annotator):
@property
def columns(self):
return ["a"]
def calc(self, df):
return pd.DataFrame({"a": ["x", "y"]})
a += Dynamic()
a.annotate()
assert_frame_equal(
a.df, pd.DataFrame({"A": [1, 2], "B": ["c", "d"], "a": ["x", "y"]})
)
def test_annos_added_only_once(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
a += c # this get's ignored
def test_annos_same_column_different_anno(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
c = CountingConstant("hello2", "c")
a += c
a.annotate()
assert "hello2" in a.df.columns
assert count[0] == 2
d = CountingConstant("hello2", "d")
assert c is not d
with pytest.raises(ValueError):
a += d
def test_annos_same_column_different_anno2(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += A()
with pytest.raises(ValueError):
a += B()
def test_annos_dependening(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
a.annotate()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_annos_dependening_none(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [None, A(), None]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
a.annotate()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_filtering(self):
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "B"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("C", "c")
assert "C" in a.df.columns
b = a.filter("sha", lambda df: df["A"] == 1)
assert "C" in b.df.columns
a += A()
assert "aa" in a.df.columns
assert "aa" in b.df.columns
b += B()
assert "ab" in b.df.columns
assert not "ab" in a.df.columns
def test_filtering2(self):
counts = collections.Counter()
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
counts["A"] += 1
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "B"
columns = ["ab"]
def calc(self, df):
counts["B"] += 1
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
b = a.filter("sha", lambda df: df["A"] == 1)
b += B()
assert "aa" in b.df.columns
assert "ab" in b.df.columns
assert not "aa" in a.df.columns
assert not "ab" in a.df.columns
assert counts["A"] == 1
a += A()
assert "aa" in a.df.columns
assert counts["A"] == 2 # no two recalcs
assert not "ab" in a.df.columns
a += B()
assert "ab" in a.df.columns
assert counts["A"] == 2 # no two recalcs
assert counts["B"] == 2 # no two recalcs
def test_filtering_result_dir(self):
counts = collections.Counter()
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
counts["A"] += 1
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
b = a.filter("sha", lambda df: df["A"] == 1, result_dir="shu2")
assert b.result_dir.absolute() == Path("shu2").absolute()
def test_filtering_on_annotator(self):
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: (["a", "b"] * int(len(df) / 2 + 1))[: len(df)]},
index=df.index,
)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(KeyError):
b = a.filter("sha", lambda df: df["aa"] == "a")
b = a.filter("sha", lambda df: df["aa"] == "a", [A()])
canno = Constant("C", "c")
a += canno
b += canno
assert (b.df["A"] == [1]).all()
def test_multi_level(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
b = a.filter("sha", lambda df: df["C"] == 4, Constant("C", 4))
a1 = LenAnno("count")
b += a1
c = b.filter("shc", lambda df: df["A"] >= 2)
a2 = LenAnno("count2")
c += a2
c.annotate()
print(c.df)
assert len(c.df) == 2
assert (c.df["A"] == [2, 3]).all()
assert (c.df["count"] == "count3").all()
assert (c.df["count2"] == "count22").all()
def test_anno_not_returning_enough_rows_and_no_index_range_index_on_df(self):
class BrokenAnno(Annotator):
columns = ["X"]
def calc(self, df):
return pd.DataFrame({"X": [1]})
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"]})
)
with pytest.raises(ValueError) as excinfo:
a += BrokenAnno()
print(str(excinfo))
assert "Length and index mismatch " in str(excinfo.value)
def test_anno_returning_series(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C"]
def calc(self, df):
return pd.Series(list(range(len(df))))
a += SeriesAnno()
assert (a.df["C"] == [0, 1, 2]).all()
def test_anno_returning_series_but_defined_two_columns(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C", "D"]
def calc(self, df):
return pd.Series(list(range(len(df))))
with pytest.raises(ValueError) as excinfo:
a += SeriesAnno()
assert "result was no dataframe" in str(excinfo)
def test_anno_returning_string(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C", "D"]
def calc(self, df):
return "abc"
with pytest.raises(ValueError) as excinfo:
a += SeriesAnno()
assert "return non DataFrame" in str(excinfo)
def test_anno_returing_right_length_but_wrong_start_range_index(self):
a = DelayedDataFrame("shu", lambda: pd.DataFrame({"A": [1, 2, 3]}))
class BadAnno(Annotator):
columns = ["X"]
def calc(self, df):
return pd.Series(["a", "b", "c"], index=pd.RangeIndex(5, 5 + 3))
with pytest.raises(ValueError) as excinfo:
a += BadAnno()
assert "Index mismatch" in str(excinfo)
def test_lying_about_columns(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C"]
def calc(self, df):
return pd.DataFrame({"D": [0, 1, 2]})
with pytest.raises(ValueError) as excinfo:
a += SeriesAnno()
assert "declared different" in str(excinfo)
def test_filtering_by_definition_operators(self):
a = DelayedDataFrame("shu", pd.DataFrame({"A": [-1, 0, 1, 2, 3, 4]}))
assert (a.filter("a1", [("A", "==", 0)]).df["A"] == [0]).all()
assert (a.filter("a2", [("A", ">=", 3)]).df["A"] == [3, 4]).all()
assert (a.filter("a3", [("A", "<=", 0)]).df["A"] == [-1, 0]).all()
assert (a.filter("a4", [("A", ">", 3)]).df["A"] == [4]).all()
assert (a.filter("a5", [("A", "<", 0)]).df["A"] == [-1]).all()
assert (a.filter("a6", [("A", "|>", 0)]).df["A"] == [-1, 1, 2, 3, 4]).all()
assert (a.filter("a7", [("A", "|>=", 1)]).df["A"] == [-1, 1, 2, 3, 4]).all()
assert (a.filter("a8", [("A", "|<", 2)]).df["A"] == [-1, 0, 1]).all()
assert (a.filter("a9", [("A", "|<=", 2)]).df["A"] == [-1, 0, 1, 2]).all()
with pytest.raises(ValueError):
a.filter("a10", [("A", "xx", 2)])
class XAnno(Annotator):
def __init__(self, column_name, values):
self.columns = [column_name]
self.values = values
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.values}, index=df.index)
@pytest.mark.usefixtures("both_ppg_and_no_ppg")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameBoth:
def test_filtering_by_definition(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = XAnno("C", [1, 2])
a += c
d = XAnno("D", [4, 5])
# native column
a1 = a.filter("a1", ("A", "==", 1))
# search for the anno
a2 = a.filter("a2", ("C", "==", 2))
# extract the column name from the anno - anno already added
a4 = a.filter("a4", (d, "==", 5))
# extract the column name from the anno - anno not already added
a3 = a.filter("a3", (c, "==", 1))
# lookup column to name
a6 = a.filter("a6", ("X", "==", 2), column_lookup={"X": "C"})
# lookup column to anno
a7 = a.filter("a7", ("X", "==", 2), column_lookup={"X": c})
if not ppg.inside_ppg():
e1 = XAnno("E", [6, 7])
e2 = XAnno("E", [6, 8])
assert find_annos_from_column("E") == [e1, e2]
# column name to longer unique
with pytest.raises(KeyError):
a.filter("a5", ("E", "==", 5))
with pytest.raises(KeyError):
a.filter("a5", ((c, "D"), "==", 5))
force_load(a1.annotate())
force_load(a2.annotate())
force_load(a3.annotate())
force_load(a4.annotate())
force_load(a6.annotate())
force_load(a7.annotate())
run_pipegraph()
assert (a1.df["A"] == [1]).all()
assert (a2.df["A"] == [2]).all()
assert (a3.df["A"] == [1]).all()
assert (a4.df["A"] == [2]).all()
assert (a6.df["A"] == [2]).all()
assert (a7.df["A"] == [2]).all()
@pytest.mark.usefixtures("new_pipegraph")
class Test_DelayedDataFramePPG:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert not hasattr(a, "df")
print("load is", a.load())
force_load(a.load(), False)
ppg.run_pipegraph()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
fn = a.write()[0]
ppg.run_pipegraph()
assert Path(fn.filenames[0]).exists()
assert_frame_equal(pd.read_csv(fn.filenames[0], sep="\t"), test_df)
def test_write_mixed_manglers(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
a.write(mangler_function=lambda df: df)
def b(df):
return df.head()
ok = False
try:
a.write(mangler_function=b)
except Exception as e:
se = str(type(e))
if "JobContractError" in se: # ppg
ok = True
elif "JobRedefinitionError" in se: # ppg2
ok = True
if not ok:
raise ValueError("Did not raise the expected exception")
def test_annotator_basic(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("aa", "aa")
force_load(a.annotate())
ppg.run_pipegraph()
assert (a.df["aa"] == "aa").all()
def test_annotator_raising(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class RaiseAnno(Annotator):
columns = ["aa"]
cache_name = "empty"
def calc(self, df):
raise ValueError("hello")
anno1 = RaiseAnno()
a += anno1
force_load(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
anno_job = a.anno_jobs[RaiseAnno().get_cache_name()]
assert "hello" in str(anno_job.lfg.exception)
def test_annotator_columns_not_list(self):
class BrokenAnno(Annotator):
def __init__(
self,
):
self.columns = "shu"
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += BrokenAnno()
lg = a.anno_jobs[BrokenAnno().get_cache_name()]
force_load(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "list" in str(lg().lfg.exception)
def test_annotator_empty_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class EmptyColumnNames(Annotator):
columns = []
cache_name = "empty"
def calc(self, df):
return pd.DataFrame({"shu": [1, 2]})
def __repr__(self):
return "EmptyColumNames()"
a += EmptyColumnNames()
force_load(a.annotate())
anno_job_cb = a.anno_jobs[EmptyColumnNames().get_cache_name()]
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert anno_job_cb() is anno_job_cb()
assert "anno.columns was empty" in repr(anno_job_cb().exception)
def test_annotator_missing_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class MissingColumnNames(Annotator):
cache_name = "MissingColumnNames"
def calc(self, df):
return pd.DataFrame({})
def __repr__(self):
return "MissingColumnNames()"
a += MissingColumnNames()
lg = a.anno_jobs["MissingColumnNames"]
force_load(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "AttributeError" in repr(lg().lfg.exception)
def test_DynamicColumNames(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class Dynamic(Annotator):
@property
def columns(self):
return ["a"]
def calc(self, df):
return pd.DataFrame({"a": ["x", "y"]})
a += Dynamic()
a.anno_jobs[Dynamic().get_cache_name()]
force_load(a.annotate())
ppg.run_pipegraph()
assert_frame_equal(
a.df, pd.DataFrame({"A": [1, 2], "B": ["c", "d"], "a": ["x", "y"]})
)
def test_annos_same_column_different_anno(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = Constant("hello", "c")
a += c
c = Constant("hello2", "c")
a += c
c = Constant("hello2", "d")
with pytest.raises(ValueError):
a += c
def test_annos_dependening(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(a.annotate())
ppg.run_pipegraph()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_filteringA(self):
ppg.util.global_pipegraph.quiet = False
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
b = a.filter("sha", lambda df: df["A"] == 1)
a += LenAnno("C")
b.write()
ppg.run_pipegraph()
assert "C" in b.df.columns
assert "C" in a.df.columns
assert (b.df["C"] == "C2").all()
assert (a.df["C"] == "C2").all()
def test_filteringB(self):
ppg.util.global_pipegraph.quiet = False
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
b = a.filter("sha", lambda df: df["A"] == 1)
a += LenAnno("C")
b += LenAnno("D")
assert not LenAnno("D").get_cache_name() in a.anno_jobs
b.write()
ppg.run_pipegraph()
assert not LenAnno("D").get_cache_name() in a.anno_jobs
assert "C" in b.df.columns
assert "C" in a.df.columns
assert not "D" in a.df.columns
assert len(a.df) == 2
assert len(b.df) == 1
assert (b.df["C"] == "C2").all()
assert (b.df["D"] == "D1").all()
assert (a.df["C"] == "C2").all()
assert not "D" in a.df.columns
def test_filteringC(self):
ppg.util.global_pipegraph.quiet = False
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
# a += LenAnno("C")
b = a.filter("sha", lambda df: df["C"] == 2, LenAnno("C"), set())
b.write()
ppg.run_pipegraph()
assert "C" in a.df
assert "C" in b.df
def test_filter_and_clone_without_annos(self):
ppg.util.global_pipegraph.quiet = False
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
# a += LenAnno("C")
b = a.filter("sha", lambda df: df["C"] == 2, LenAnno("C"), set())
b.write()
with pytest.raises(ValueError):
b.clone_without_annotators("shc", "hello")
c = b.clone_without_annotators("shc", result_dir="dir_c")
fn = c.write()[1]
ppg.run_pipegraph()
assert "C" in a.df
assert "C" in b.df
assert "C" not in c.df
written = pd.read_csv(fn, sep="\t")
assert set(c.df.columns) == set(written.columns)
for col in c.df.columns:
assert (c.df[col] == written[col]).all()
def test_filtering_on_annotator_missing(self):
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: (["a", "b"] * int(len(df) / 2 + 1))[: len(df)]},
index=df.index,
)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
b = a.filter("sha", lambda df: df["aaA"] == "a")
load_job = b.load()
a.write()
print("run now")
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "KeyError" in repr(load_job.lfg.exception)
def test_forbidden_cache_names(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c1 = Constant("c1*", "*")
c2 = Constant("c2/", "*")
c3 = Constant("c3?", "*")
c4 = Constant("c4" * 100, "*")
with pytest.raises(ValueError):
a += c1
with pytest.raises(ValueError):
a += c2
with pytest.raises(ValueError):
a += c3
with pytest.raises(ValueError):
a += c4
def test_multi_level(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
b = a.filter("sha", lambda df: df["C"] == 4, Constant("C", 4))
a1 = LenAnno("count")
b += a1
c = b.filter("shc", lambda df: df["A"] >= 2)
a2 = LenAnno("count2")
c += a2
c.write()
ppg.run_pipegraph()
assert len(c.df) == 2
assert (c.df["A"] == [2, 3]).all()
assert (c.df["count"] == "count3").all()
assert (c.df["count2"] == "count22").all()
def test_anno_not_returning_enough_rows_and_no_index(self):
class BrokenAnno(Annotator):
columns = ["X"]
def calc(self, df):
return pd.DataFrame({"X": [1]})
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
a += BrokenAnno()
lj = a.anno_jobs["X"]
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "Length and index mismatch " in str(lj().exception)
def test_anno_not_returning_enough_rows_and_no_index_range_index_on_df(self):
class BrokenAnno(Annotator):
columns = ["X"]
def calc(self, df):
return pd.DataFrame({"X": [1]})
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"]})
)
a += BrokenAnno()
lj = a.anno_jobs["X"]
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "Length and index mismatch " in str(lj().exception)
def test_annotator_coliding_with_non_anno_column(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
a += Constant("A", "aa")
lj = a.anno_jobs["A"]
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "were already present" in str(lj().exception)
def test_anno_returning_series(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C"]
def calc(self, df):
return pd.Series(list(range(len(df))))
a += SeriesAnno()
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(a.annotate())
ppg.run_pipegraph()
assert (a.df["C"] == [0, 1, 2]).all()
def test_anno_returning_series_but_defined_two_columns(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C", "D"]
def calc(self, df):
return pd.Series(list(range(len(df))))
a += SeriesAnno()
lj = a.anno_jobs["C"]
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "result was no dataframe" in str(lj().lfg.exception)
def test_anno_returning_string(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C", "D"]
def calc(self, df):
return "abc"
a += SeriesAnno()
lj = a.anno_jobs["C"]
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "result was no dataframe" in str(lj().lfg.exception)
def test_lying_about_columns(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C"]
def calc(self, df):
return pd.DataFrame({"D": [0, 1, 2]})
a += SeriesAnno()
lj = a.anno_jobs["C"]
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "declared different " in str(lj().exception)
def test_annotator_depending_on_actual_jobs(self):
def wf():
Path("fileA").write_text("hello")
class TestAnno(Annotator):
columns = ["C"]
def calc(self, df):
prefix = Path("fileA").read_text()
return pd.Series([prefix] * len(df))
def deps(self, ddf):
return [ppg.FileGeneratingJob("fileA", wf)]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"]})
)
a.add_annotator(TestAnno())
a.write()
ppg.run_pipegraph()
assert (a.df["C"] == "hello").all()
def test_nested_anno_dependencies(self):
class Nested(Annotator):
columns = ["b"]
def calc(self, df):
return pd.Series([10] * len(df))
def dep_annos(self):
return [Constant("Nestedconst", 5)]
class Nesting(Annotator):
columns = ["a"]
def calc(self, df):
return pd.Series([15] * len(df))
def dep_annos(self):
return [Constant("Nestingconst", 5), Nested()]
anno = Nesting()
a = DelayedDataFrame(
"shu", lambda: | pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"]}) | pandas.DataFrame |
"""Mongo QCDB Database object and helpers
"""
import numpy as np
import itertools as it
import math
import json
import copy
import pandas as pd
from . import molecule
from . import statistics
# from . import visualization
# from . import mongo_helper
from . import constants
from . import fields
# from . import client
def _nCr(n, r):
"""
Compute the binomial coefficient n! / (k! * (n-k)!)
"""
return math.factorial(n) / math.factorial(r) / math.factorial(n - r)
class Database(object):
"""
This is a Mongo QCDB database class.
"""
def __init__(self, name, socket=None, db_type="rxn"):
# Client and mongod objects
self.client = None
self.mongod = None
# Blank data object
self.data = {}
self.data["reactions"] = []
self.data["name"] = name
self.data["provenence"] = {}
self.data["db_type"] = db_type.upper()
if self.data["db_type"] not in ["RXN", "IE"]:
raise TypeError("Database: db_type must either be RXN or IE.")
# Index and internal data
self.df = pd.DataFrame()
self.rxn_index = pd.DataFrame()
if socket is not None:
if isinstance(socket, client.Client):
self.client = socket
self.mongod = socket # This is overloaded
elif isinstance(socket, mongo_helper.MongoSocket):
self.mongod = socket
else:
raise TypeError("Database: client argument of unrecognized type '%s'" %
type(socket))
tmp_data = self.mongod.mongod_query("get_database", name)
if tmp_data is None:
print("Warning! Name '%s' not found, creating blank database." % name)
else:
self.data = tmp_data
self.df = pd.DataFrame(index=self.get_index())
# Unroll the index
tmp_index = []
for rxn in self.data["reactions"]:
name = rxn["name"]
for stoich_name in list(rxn["stoichiometry"]):
for mol_hash, coef in rxn["stoichiometry"][stoich_name].items():
tmp_index.append([name, stoich_name, mol_hash, coef])
self.rxn_index = pd.DataFrame(
tmp_index, columns=["name", "stoichiometry", "molecule_hash", "coefficient"])
# If we making a new database we may need new hashes and json objects
self._new_molecule_jsons = {}
# What queried data do we have?
self._queries = {}
# Getters
def __getitem__(self, args):
return self.df[args]
def refresh(self):
"""
Reruns the entire query history to rebuild the current database from saved pages.
"""
for k, q in self._queries.items():
self.query(q[0], **q[1])
return True
def _unroll_query(self, keys, stoich, **kwargs):
tmp_idx = self.rxn_index[self.rxn_index["stoichiometry"] == stoich].copy()
tmp_idx = tmp_idx.reset_index(drop=True)
# There could be duplicates so take the unique and save the map
umols, uidx = np.unique(tmp_idx["molecule_hash"], return_index=True)
# Evaluate the overall dataframe
if "field" in kwargs and kwargs["field"] is None:
del kwargs["field"]
values = self.mongod.mongod_query("evaluate", list(umols), list(keys), **kwargs)
# Join on molecule hash
tmp_idx = tmp_idx.join(values, on="molecule_hash")
# Apply stoich values
for col in values.columns:
tmp_idx[col] *= tmp_idx["coefficient"]
tmp_idx = tmp_idx.drop(['stoichiometry', 'molecule_hash', 'coefficient'], axis=1)
# If *any* value is null in the stoich sum, the whole thing should be Null. Pandas is being too clever
null_mask = tmp_idx.copy()
null_mask[keys] = null_mask[keys].isnull()
null_mask = null_mask.groupby(["name"]).sum() != False
tmp_idx = tmp_idx.groupby(["name"]).sum()
tmp_idx[null_mask] = np.nan
return tmp_idx
def query(self,
keys,
stoich="default",
prefix="",
postfix="",
reaction_results=False,
scale="kcal",
field=None,
ignore_db_type=False):
"""
Queries the local MongoSocket data for the requested keys and stoichiometry.
Parameters
----------
keys : str, list
A list of model chemistry to query.
stoich : str
The given stoichiometry to compute.
prefix : str
A prefix given to the resulting column names.
postfix : str
A postfix given to the resulting column names.
reaction_results : bool
Toggles a search between the Mongo Pages and the Databases's reaction_results field.
scale : str, double
All units are based in hartree, the default scaling is to kcal/mol.
ignore_db_type : bool
Override of IE for RXN db types.
Returns
-------
success : bool
Returns True if the requested query was successful or not.
Notes
-----
Examples
--------
db.query(["B3LYP/aug-cc-pVDZ", "B3LYP/def2-QZVP"], stoich="cp", prefix="cp-")
"""
if not reaction_results and (self.mongod is None):
raise AttributeError("DataBase: MongoSocket was not set.")
# Keys should be iterable
if isinstance(keys, str):
keys = [keys]
# Save query to be repeated by refresh
query_packet = [
keys, {
"stoich": stoich,
"prefix": prefix,
"postfix": postfix,
"reaction_results": reaction_results,
"scale": scale
}
]
query_packet_hash = fields.get_hash(query_packet, None)
if query_packet_hash not in self._queries:
self._queries[query_packet_hash] = query_packet
# If reaction results
if reaction_results:
tmp_idx = pd.DataFrame(index=self.df.index, columns=keys)
for rxn in self.data["reactions"]:
for col in keys:
try:
tmp_idx.ix[rxn["name"], col] = rxn["reaction_results"][stoich][col]
except:
pass
# Convert to numeric
tmp_idx = tmp_idx.apply(lambda x: pd.to_numeric(x, errors='ignore'))
tmp_idx[tmp_idx.select_dtypes(include=['number']).columns] *= constants.get_scale(scale)
tmp_idx.columns = [prefix + x + postfix for x in tmp_idx.columns]
self.df[tmp_idx.columns] = tmp_idx
return True
# if self.data["db_type"].lower() == "ie":
# _ie_helper(..)
if (not ignore_db_type) and (self.data["db_type"].lower() == "ie"):
monomer_stoich = ''.join([x for x in stoich if not x.isdigit()]) + '1'
tmp_idx_complex = self._unroll_query(keys, stoich, field=field)
tmp_idx_monomers = self._unroll_query(keys, monomer_stoich, field=field)
# Combine
tmp_idx = tmp_idx_complex - tmp_idx_monomers
else:
tmp_idx = self._unroll_query(keys, stoich, field=field)
tmp_idx.columns = [prefix + x + postfix for x in tmp_idx.columns]
# scale
tmp_idx = tmp_idx.apply(lambda x: pd.to_numeric(x, errors='ignore'))
tmp_idx[tmp_idx.select_dtypes(include=['number']).columns] *= constants.get_scale(scale)
# Apply to df
self.df[tmp_idx.columns] = tmp_idx
return True
def compute(self, keys, stoich="default", options=None, program="psi4", other_fields=None, ignore_db_type=False):
if options is None:
options = {}
if other_fields is None:
other_fields = {}
if self.client is None:
raise AttributeError("DataBase: Compute: Client was not set.")
# Keys should be iterable
if isinstance(keys, str):
keys = [keys]
if (not ignore_db_type) and (self.data["db_type"].lower() == "ie"):
monomer_stoich = ''.join([x for x in stoich if not x.isdigit()]) + '1'
tmp_monomer = self.rxn_index[self.rxn_index["stoichiometry"] == monomer_stoich].copy()
tmp_complex = self.rxn_index[self.rxn_index["stoichiometry"] == stoich].copy()
tmp_idx = pd.concat((tmp_monomer, tmp_complex), axis=0)
else:
tmp_idx = self.rxn_index[self.rxn_index["stoichiometry"] == stoich].copy()
tmp_idx = tmp_idx.reset_index(drop=True)
# There could be duplicates so take the unique and save the map
umols, uidx = np.unique(tmp_idx["molecule_hash"], return_index=True)
values = self.mongod.mongod_query("evaluate", list(umols), list(keys))
mask = | pd.isnull(values) | pandas.isnull |
from datetime import datetime, timedelta
# Airflow
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
# SQL
import sqlite3
# Data
from sklearn.datasets import load_iris
# Preprocessing and Metrics
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split, cross_val_score, KFold
import pandas as pd
# Models
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
# CREATE DB
def create_db():
'''
Create SQLite data base for Iris dataset
'''
conn = sqlite3.connect('airflow.db')
cursor = conn.cursor()
try:
cursor.execute("""
CREATE TABLE iris (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
SepalLengthCm FLOAT NOT NULL,
SepalWidthCm FLOAT NOT NULL,
PetalLengthCm FLOAT NOT NULL,
PetalWidthCm FLOAT NOT NULL,
Species TEXT NOT NULL,
);
""")
conn.commit()
print('Table created successfully.')
except:
print("Table already exists")
pass
conn.close()
# DB CONNECT
def db_connect():
'''
Connect to DB
'''
conn = sqlite3.connect('airflow.db')
cursor = conn.cursor()
return conn, cursor
# READ DB
def db_2_df(limit):
'''
Read the data from DB
'''
conn, cursor = db_connect()
data = pd.read_sql_query(f"SELECT * FROM iris LIMIT {limit}", conn)
conn.close()
return data
# GET DATA FROM SCIKIT-LEARN
def get_data():
'''
Get iris dataset from scikit-learn datasets and save to SQLite
'''
iris = load_iris()
conn, cursor = db_connect()
df = | pd.DataFrame(iris.data, columns = ["sepal_lenght","sepal_width","petal_lenght","petal_width"]) | pandas.DataFrame |
'''
Manual clustering script that runs onc euser has provided k. It then performs k-means clustering
adds cluster nodes to the Neo4j DB no.1-> k. Adds relationships:
(Sample)-[IN_CLUSTER]->(Cluster)
(Pair)-[HAS_CLUSTER]->(Cluster)
'''
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from py2neo import Node, Relationship, Graph, Path, authenticate, remote
from tqdm import tqdm
import datetime
import argparse
import sys, csv
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
def get_timestamp():
"""
Get timestamp of current date and time.
"""
timestamp = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
return timestamp
def run_KMeans(pairID, k):
# KMeans
MCAdata = ('data/plots/mca_' + str(pairID) + '.dat')
# for 3d
# df = pd.read_csv('data/plots/mca3d_22.dat', header=0, index_col=0)
# df2 = df[['x', 'y', 'z']]
# for 2d
df = pd.read_csv(MCAdata, header=0, index_col=0)
df2 = df[['x', 'y']]
kmeans = KMeans(n_clusters=k)
kmeans.fit(df2)
labels = kmeans.predict(df2) # same as clusassign = kmeans.fit_predict(X.as_matrix())
df['clusterID'] = labels
# get cluster representatives and centroids
centroids = kmeans.cluster_centers_
min_dist = np.min(cdist(df2.as_matrix(), kmeans.cluster_centers_, 'euclidean'), axis=1)
Y = pd.DataFrame(min_dist, index=df2.index, columns=['Center_euclidean_dist'])
Z = | pd.DataFrame(labels, index=df2.index, columns=['cluster_ID']) | pandas.DataFrame |
import pandas as pd
from astropy.units import earthMass, jupiterMass, earthRad, jupiterRad, solMass, solRad, AU
from .teq_planet import getTeqpl, getTeqpl_error
import numpy as np
from uncertainties import ufloat
def read_file_pandas(csv_file, radius=True):
# Read the CSV file with Pandas
# in a dataset
# with specific parameters
dataset = pd.read_csv(csv_file)
if radius is True:
dataset = dataset[['mass', 'mass_error_max', 'semi_major_axis',
'orbital_period', 'eccentricity',
'star_radius', 'star_teff', 'star_mass',
'radius', 'radius_error_max']]
else:
dataset = dataset[['mass', 'mass_error_max', 'semi_major_axis',
'orbital_period', 'eccentricity',
'star_radius', 'star_teff', 'star_mass']]
return dataset
def get_semi_amplitude_k(ecc, m_star, m_p, a, inc):
# Compute the velocity semi amplitude K
# ecc : eccentricity
# m_star : star mass(solar mass)
# m_p : planet mass(jupiter mass)
# a : semi major axis
# inc : inclination
# Return k in m.s-1
# -------------------------------------------------------
sqrt_g = 28.4329 # m.s-1
m_p_solar = m_p * jupiterMass.to('solMass')
# Compute the semi_amplitude K
k = ((sqrt_g / np.sqrt(1 - ecc**2)) *
(m_p * np.sin(inc)) *
((m_star + m_p_solar)**(-1/2)) * a**(-1/2))
return abs(k)
def add_k_dataset(dataset):
# Add the velocity semi amplitude to dataset
k_planet = [get_semi_amplitude_k(ecc, m_star, m_p, a, inc)
for ecc, m_star, m_p, a, inc
in zip(dataset.eccentricity, dataset.star_mass,
dataset.mass, dataset.semi_major_axis,
dataset.inclination)]
dataset.insert(2, 'k', k_planet)
return dataset
def add_temp_eq_dataset(dataset):
semi_major_axis = dataset.semi_major_axis * AU.to('solRad')
teq_planet = [getTeqpl(teff, a/rad, ecc)
for teff, a, rad, ecc,
in zip(dataset.star_teff, semi_major_axis,
dataset.star_radius, dataset.eccentricity)]
dataset.insert(2, 'temp_eq', teq_planet)
return dataset
def add_temp_eq_error_dataset(dataset):
semi_major_axis = dataset.semi_major_axis * AU.to('solRad')
semi_major_axis_error = dataset.semi_major_axis_error * AU.to('solRad')
teq_planet = [getTeqpl_error(ufloat(teff, abs(teff_e)),
ufloat(a, abs(a_e))/ufloat(rad, abs(rad_e)),
ufloat(ecc, abs(ecc_e)))
for teff, teff_e, a, a_e, rad, rad_e, ecc, ecc_e
in zip(dataset.star_teff, dataset.star_teff_error,
semi_major_axis, semi_major_axis_error,
dataset.star_radius, dataset.star_radius_error,
dataset.eccentricity, dataset.eccentricity_error)]
teq_planet_value = [teq.nominal_value for teq in teq_planet]
teq_planet_error = [teq.s for teq in teq_planet]
dataset.insert(2, 'temp_eq_error', teq_planet_error)
dataset.insert(2, 'temp_eq', teq_planet_value)
return dataset
def add_star_luminosity_dataset(dataset):
# Compute the stellar luminosity
# L_star/L_sun = (R_star/R_sun)**2 * (Teff_star / Teff_sun)**4
# Radius star is already expressed in Sun radii in the dataset
# lum_sun = 3.828 * 10**26 # Watt
# radius_sun = 6.95508 * 10**8 # meters
Teff_sun = 5777.0 # Kelvin
L_star = [R_star**2 * (Teff_star / Teff_sun)**4
for R_star, Teff_star
in zip(dataset.star_radius, dataset.star_teff)]
dataset.insert(2, 'star_luminosity', L_star)
return dataset
def add_star_luminosity_error_dataset(dataset):
# Compute the stellar luminosity
# L_star/L_sun = (R_star/R_sun)**2 * (Teff_star / Teff_sun)**4
# Radius star is already expressed in Sun radii in the dataset
# lum_sun = 3.828 * 10**26 # Watt
# radius_sun = 6.95508 * 10**8 # meters
Teff_sun = 5778 # Kelvin
L_star = [ufloat(R_star, abs(R_star_error))**2 *
(ufloat(Teff_star, abs(Teff_star_error)) / Teff_sun)**4
for R_star, R_star_error, Teff_star, Teff_star_error
in zip(dataset.star_radius, dataset.star_radius_error,
dataset.star_teff, dataset.star_teff_error)]
L_star_value = [ls.nominal_value for ls in L_star]
L_star_error = [ls.s for ls in L_star]
dataset.insert(2, 'star_luminosity_error', L_star_error)
dataset.insert(2, 'star_luminosity', L_star_value)
return dataset
def add_insolation_dataset(dataset):
# Compute the insolation flux
# S / S_earth = (L_star / L_sun) * (AU / a)**2
insolation_earth = 1.37 * 10**3 # Watts/m2
# insolation = [insolation_earth * (l_star * (1 / a))
# for l_star, a
# in zip(dataset.star_luminosity, dataset.semi_major_axis)]
# Insolation expressed in Solar insolation
insolation = [(l_star * (1 / a))
for l_star, a
in zip(dataset.star_luminosity, dataset.semi_major_axis)]
dataset.insert(2, 'insolation', insolation)
return dataset
def jupiter_to_earth_mass(dataset, column_name):
df = dataset[column_name].apply(lambda x:
(x*jupiterMass).to('earthMass').value)
new_df = pd.DataFrame({column_name: df})
dataset.update(new_df)
return dataset
def jupiter_to_earth_radius(dataset, column_name):
df = dataset[column_name].apply(lambda x:
(x*jupiterRad).to('earthRad').value)
new_df = | pd.DataFrame({column_name: df}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# ----------------------------------------------------------------------------
"""
Tests for the Variable Explorer Collections Editor.
"""
# Standard library imports
import os # Example module for testing display inside CollecitonsEditor
from os import path
import copy
import datetime
from xml.dom.minidom import parseString
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
import numpy
import pandas
import pytest
from flaky import flaky
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QWidget
# Local imports
from spyder.plugins.variableexplorer.widgets.collectionseditor import (
RemoteCollectionsEditorTableView, CollectionsEditorTableView,
CollectionsModel, CollectionsEditor, LARGE_NROWS, ROWS_TO_LOAD)
from spyder.plugins.variableexplorer.widgets.namespacebrowser import (
NamespacesBrowserFinder)
from spyder.plugins.variableexplorer.widgets.tests.test_dataframeeditor import \
generate_pandas_indexes
from spyder.py3compat import PY2
# =============================================================================
# Constants
# =============================================================================
# Full path to this file's parent directory for loading data
LOCATION = path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# =============================================================================
# Utility functions
# =============================================================================
def data(cm, i, j):
return cm.data(cm.index(i, j))
def data_table(cm, n_rows, n_cols):
return [[data(cm, i, j) for i in range(n_rows)] for j in range(n_cols)]
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture
def nonsettable_objects_data():
"""Rturn Python objects with immutable attribs to test CollectionEditor."""
test_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
expected_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
keys_test = [["_typ", "day", "dayofyear", "hour"],
["_typ", "nbytes", "ndim"]]
return zip(test_objs, expected_objs, keys_test)
# =============================================================================
# Tests
# ============================================================================
def test_rename_variable(qtbot):
"""Test renaming of the correct variable."""
variables = {'a': 1,
'b': 2,
'c': 3,
'd': '4',
'e': 5}
editor = CollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
editor.rename_item(new_name='b2')
assert editor.model.rowCount() == 5
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'b2'
assert data(editor.model, 2, 0) == 'c'
assert data(editor.model, 3, 0) == 'd'
assert data(editor.model, 4, 0) == 'e'
# Reset variables and try renaming one again
new_variables = {'a': 1,
'b': 2,
'b2': 2,
'c': 3,
'd': '4',
'e': 5}
editor.set_data(new_variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.rename_item(new_name='b3')
assert editor.model.rowCount() == 6
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'b2'
assert data(editor.model, 2, 0) == 'b3'
assert data(editor.model, 3, 0) == 'c'
assert data(editor.model, 4, 0) == 'd'
assert data(editor.model, 5, 0) == 'e'
def test_remove_variable(qtbot):
"""Test removing of the correct variable."""
variables = {'a': 1,
'b': 2,
'c': 3,
'd': '4',
'e': 5}
editor = CollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
# Reset variables and try removing one again
editor.set_data(variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
def test_remove_remote_variable(qtbot, monkeypatch):
"""Test the removing of the correct remote variable."""
variables = {'a': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '1'},
'b': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '2'},
'c': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '3'},
'd': {'type': 'str',
'size': 1, 'color': '#800000',
'view': '4'},
'e': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '5'}}
editor = RemoteCollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model.index(1, 0))
# Monkey patch remove variables
def remove_values(ins, names):
assert names == ['b']
data = {'a': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '1'},
'c': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '3'},
'd': {'type': 'str',
'size': 1, 'color': '#800000',
'view': '4'},
'e': {'type': 'int',
'size': 1,
'color': '#0000ff',
'view': '5'}}
editor.set_data(data)
monkeypatch.setattr(
'spyder.plugins.variableexplorer.widgets'
'.collectionseditor.RemoteCollectionsEditorTableView.remove_values',
remove_values)
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
# Reset variables and try removing one again
editor.set_data(variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model.index(1, 0))
editor.remove_item(force=True)
assert editor.model.rowCount() == 4
assert data(editor.model, 0, 0) == 'a'
assert data(editor.model, 1, 0) == 'c'
assert data(editor.model, 2, 0) == 'd'
assert data(editor.model, 3, 0) == 'e'
def test_filter_rows(qtbot):
"""Test rows filtering."""
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'dfa': df, 'dfb': df})
editor.finder = NamespacesBrowserFinder(editor,
editor.set_regex)
qtbot.addWidget(editor)
# Initially two rows
assert editor.model.rowCount() == 2
# Match two rows by name
editor.finder.setText("df")
assert editor.model.rowCount() == 2
# Match two rows by type
editor.finder.setText("DataFrame")
assert editor.model.rowCount() == 2
# Only one match
editor.finder.setText("dfb")
assert editor.model.rowCount() == 1
# No match
editor.finder.setText("dfbc")
assert editor.model.rowCount() == 0
def test_create_dataframeeditor_with_correct_format(qtbot, monkeypatch):
MockDataFrameEditor = Mock()
mockDataFrameEditor_instance = MockDataFrameEditor()
monkeypatch.setattr('spyder.plugins.variableexplorer.widgets.collectionsdelegate.DataFrameEditor',
MockDataFrameEditor)
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'df': df})
qtbot.addWidget(editor)
editor.set_dataframe_format('%10d')
editor.delegate.createEditor(None, None, editor.model.index(0, 3))
mockDataFrameEditor_instance.dataModel.set_format.assert_called_once_with('%10d')
def test_accept_sig_option_changed_from_dataframeeditor(qtbot, monkeypatch):
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'df': df})
qtbot.addWidget(editor)
editor.set_dataframe_format('%10d')
assert editor.source_model.dataframe_format == '%10d'
editor.delegate.createEditor(None, None, editor.model.index(0, 3))
dataframe_editor = next(iter(editor.delegate._editors.values()))['editor']
qtbot.addWidget(dataframe_editor)
dataframe_editor.sig_option_changed.emit('dataframe_format', '%5f')
assert editor.source_model.dataframe_format == '%5f'
def test_collectionsmodel_with_two_ints():
coll = {'x': 1, 'y': 2}
cm = CollectionsModel(None, coll)
assert cm.rowCount() == 2
assert cm.columnCount() == 5
# dict is unordered, so first row might be x or y
assert data(cm, 0, 0) in {'x',
'y'}
if data(cm, 0, 0) == 'x':
row_with_x = 0
row_with_y = 1
else:
row_with_x = 1
row_with_y = 0
assert data(cm, row_with_x, 1) == 'int'
assert data(cm, row_with_x, 2) == 1
assert data(cm, row_with_x, 3) == '1'
assert data(cm, row_with_y, 0) == 'y'
assert data(cm, row_with_y, 1) == 'int'
assert data(cm, row_with_y, 2) == 1
assert data(cm, row_with_y, 3) == '2'
def test_collectionsmodel_with_index():
# Regression test for spyder-ide/spyder#3380,
# modified for spyder-ide/spyder#3758.
for rng_name, rng in generate_pandas_indexes().items():
coll = {'rng': rng}
cm = CollectionsModel(None, coll)
assert data(cm, 0, 0) == 'rng'
assert data(cm, 0, 1) == rng_name
assert data(cm, 0, 2) == '(20,)' or data(cm, 0, 2) == '(20L,)'
try:
assert data(cm, 0, 3) == rng._summary()
except AttributeError:
assert data(cm, 0, 3) == rng.summary()
def test_shows_dataframeeditor_when_editing_index(qtbot, monkeypatch):
for rng_name, rng in generate_pandas_indexes().items():
MockDataFrameEditor = Mock()
mockDataFrameEditor_instance = MockDataFrameEditor()
monkeypatch.setattr('spyder.plugins.variableexplorer.widgets.collectionsdelegate.DataFrameEditor',
MockDataFrameEditor)
coll = {'rng': rng}
editor = CollectionsEditorTableView(None, coll)
editor.delegate.createEditor(None, None,
editor.model.index(0, 3))
mockDataFrameEditor_instance.show.assert_called_once_with()
@pytest.mark.skipif(os.name == 'nt' and PY2, reason='Fails on Win and py2')
def test_sort_collectionsmodel():
var_list1 = [0, 1, 2]
var_list2 = [3, 4, 5, 6]
var_dataframe1 = pandas.DataFrame([[1, 2, 3], [20, 30, 40], [2, 2, 2]])
var_dataframe2 = pandas.DataFrame([[1, 2, 3], [20, 30, 40]])
var_series1 = pandas.Series(var_list1)
var_series2 = | pandas.Series(var_list2) | pandas.Series |
""""
Written by <NAME>
This script creates supervised testing data sets with the following format:
Observed input data: gwl(t-lag)...gwl(t), rain(t-lag)...rain(t), tide(t-lag)...tide(t)
Forecast input data: rain(t+1)...rain(t+18), tide(t+1)...tide(t+18)
Label data is still gwl(t+1)...gwl(t+18)
"""
import pandas as pd
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
obs_data_path = "C:/Users/<NAME>/Documents/HRSD GIS/Site Data/Data_2010_2018/"
hrrr_dir = "C:/HRRR/"
fcst_path = "C:/Users/<NAME>/Documents/HRSD GIS/Site Data/Forecast_data/"
# indicate which well to use
well_list = ["043", "125", "129", "153", "155", "170", "175"]
# well_list = ["043"]
n_ahead = 19
# read tide data
tide_2016 = pd.read_csv(fcst_path + "forecast_tide_raw_sept2016.csv", index_col="Date Time", parse_dates=True,
infer_datetime_format=True)
tide_2017 = pd.read_csv(fcst_path + "forecast_tide_raw_jan2017.csv", index_col="Date Time", parse_dates=True,
infer_datetime_format=True)
tide_2018 = pd.read_csv(fcst_path + "forecast_tide_raw_may2018.csv", index_col="Date Time", parse_dates=True,
infer_datetime_format=True)
# convert tide to supervised learning format and combine
tide2016_super = series_to_supervised(tide_2016, 0, n_ahead)
tide2016_super = tide2016_super.drop('var1(t)', axis=1)
tide2017_super = series_to_supervised(tide_2017, 0, n_ahead)
tide2017_super = tide2017_super.drop('var1(t)', axis=1)
tide2018_super = series_to_supervised(tide_2018, 0, n_ahead)
tide2018_super = tide2018_super.drop('var1(t)', axis=1)
forecast_tide_super = pd.concat([tide2016_super, tide2017_super, tide2018_super], axis=0)
for well in well_list:
# lag and forecast values
if well == "043":
n_lag = 26
rain_name = "Precip.Avg"
if well == "125":
n_lag = 26
rain_name = "Precip."
if well == "129":
n_lag = 59
rain_name = "Precip."
if well == "153":
n_lag = 25
rain_name = "Precip.Avg"
if well == "155":
n_lag = 28
rain_name = "Precip.Avg"
if well == "170":
n_lag = 48
rain_name = "Precip."
if well == "175":
n_lag = 58
rain_name = "Precip."
# load observed and hrrr datasets
obs_data = pd.read_csv(obs_data_path + "MMPS_" + well + "_no_blanks_SI.csv", parse_dates=True,
infer_datetime_format=True)
hrrr_data = pd.read_csv(hrrr_dir + "mmps" + well + "_hrrr.csv", index_col="Datetime", parse_dates=True,
infer_datetime_format=True)
# format obs data as supervised learning problem
gwl_super = series_to_supervised(obs_data[["GWL"]], n_lag, n_ahead)
gwl_cols = []
for col in gwl_super.columns:
col_name = "gwl(" + str(col).split("(")[1]
gwl_cols.append(col_name)
gwl_super.columns = gwl_cols
gwl_dates = obs_data[obs_data.index.isin(gwl_super.index)]
gwl_dates = gwl_dates[["Datetime"]]
gwl_with_dates = | pd.concat([gwl_dates, gwl_super], axis=1, sort=False) | pandas.concat |
import time
import sys
import os
import logging
print(sys.path)
logging.basicConfig(level=logging.DEBUG)
def test_lightgbm_gpu():
import numpy as np
import pandas as pd
from h2o4gpu.util.lightgbm_dynamic import got_cpu_lgb, got_gpu_lgb
import lightgbm as lgb
X1= np.repeat(np.arange(10), 1000)
X2= np.repeat(np.arange(10), 1000)
np.random.shuffle(X2)
y = (X1 + np.random.randn(10000)) * (X2 + np.random.randn(10000))
data = pd.DataFrame({'y': y, 'X1': X1, 'X2': X2})
lgb_params = {'learning_rate' : 0.1,
'boosting' : 'dart',
'objective' : 'regression',
'metric' : 'rmse',
'feature_fraction' : 0.9,
'bagging_fraction' : 0.75,
'num_leaves' : 31,
'bagging_freq' : 1,
'min_data_per_leaf': 250, 'device_type': 'gpu', 'gpu_device_id': 0}
lgb_train = lgb.Dataset(data=data[['X1', 'X2']], label=data.y)
cv = lgb.cv(lgb_params,
lgb_train,
num_boost_round=100,
early_stopping_rounds=15,
stratified=False,
verbose_eval=50)
def test_lightgbm_cpu():
import numpy as np
import pandas as pd
from h2o4gpu.util.lightgbm_dynamic import got_cpu_lgb, got_gpu_lgb
import lightgbm as lgb
X1 = np.repeat(np.arange(10), 1000)
X2 = np.repeat(np.arange(10), 1000)
np.random.shuffle(X2)
y = (X1 + np.random.randn(10000)) * (X2 + np.random.randn(10000))
data = | pd.DataFrame({'y': y, 'X1': X1, 'X2': X2}) | pandas.DataFrame |
"""
To extract compile time and runtime data from evo-suite dataset
Version 0.3.0
- Project metric computation has been omitted.
To be used in CodART project
"""
import multiprocessing
import sys
import os
import subprocess
import threading
from collections import Counter
from functools import wraps
import warnings
from deprecated import deprecated
import re
import math
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy import stats
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, f_classif
from sklearn.decomposition import PCA
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
from imblearn.combine import SMOTEENN, SMOTETomek
from imblearn.over_sampling import SMOTE, ADASYN
# https://scitools.com/support/python-api/
# Python 3.8 and newer require the user add a call to os.add_dll_directory(“SciTools/bin/“
# os.add_dll_directory('C:/Program Files/SciTools/bin/pc-win64')
sys.path.insert(0, 'D:/program files/scitools/bin/pc-win64/python')
try:
import understand
except ModuleNotFoundError:
# Error handling
pass
from . import metrics_names
from naming import UnderstandUtility
from metrics.metrics_jcode_odor import JCodeOdorMetric
from metrics.source_code_metrics import *
import metrics.metrics_names
__version__ = '0.4.0'
__author__ = 'Morteza'
def check_compute_metrics_by_class_list(project_name: str = None, database=None, class_list=None,
csv_path=None):
class_entities = PreProcess.read_project_classes(project_name=project_name, db=database, df=class_list, )
print('Number of classes in {0}: {1}'.format(project_name, len(class_entities)))
columns = ['Project', 'NumberOfClass']
columns.extend(TestabilityMetrics.get_all_metrics_names())
dummy_data = [0 for i in range(0, len(columns) - 2)]
dummy_data.insert(0, project_name)
dummy_data.insert(1, len(class_entities))
df = pd.DataFrame(data=[dummy_data], columns=columns)
# print(df)
# print(columns)
df.to_csv(csv_path + project_name + '.csv', index=False, )
class TestabilityMetrics:
"""
"""
@classmethod
def get_class_ordinary_metrics_names(cls) -> list:
return metrics_names.class_ordinary_metrics_names
@classmethod
def get_class_lexicon_metrics_names(cls) -> list:
return metrics_names.class_lexicon_metrics_names
@classmethod
def get_package_metrics_names(cls) -> list:
return metrics_names.package_metrics_names
@classmethod
def get_project_metrics_names(cls) -> list:
return metrics_names.project_metrics_names
@classmethod
def get_all_metrics_names(cls) -> list:
metrics = list()
# print('project_metrics number: ', len(TestabilityMetrics.get_project_metrics_names()))
# for metric_name in TestabilityMetrics.get_project_metrics_names():
# metrics.append('PJ_' + metric_name)
# print('package_metrics number: ', len(TestabilityMetrics.get_package_metrics_names()))
for metric_name in TestabilityMetrics.get_package_metrics_names():
metrics.append('PK_' + metric_name)
# SOOTI is now corrected.
# print('class_lexicon_metrics number: ', len(TestabilityMetrics.get_class_lexicon_metrics_names()))
for metric_name in TestabilityMetrics.get_class_lexicon_metrics_names():
metrics.append('CSLEX_' + metric_name)
# print('class_ordinary_metrics number: ', len(TestabilityMetrics.get_class_ordinary_metrics_names()))
for metric_name in TestabilityMetrics.get_class_ordinary_metrics_names():
metrics.append('CSORD_' + metric_name)
# print('All available metrics: {0}'.format(len(metrics)))
return metrics
@classmethod
def get_all_primary_metrics_names(cls) -> list:
primary_metrics_names = list()
for metric_name in metrics_names.project_metrics_names_primary:
primary_metrics_names.append('PJ_' + metric_name)
for metric_name in metrics_names.package_metrics_names_primary:
primary_metrics_names.append('PK_' + metric_name)
for metric_name in metrics_names.class_ordinary_metrics_names_primary:
primary_metrics_names.append('CSORD_' + metric_name)
for metric_name in metrics_names.class_lexicon_metrics_names:
primary_metrics_names.append('CSLEX_' + metric_name)
return primary_metrics_names
@classmethod
def compute_java_class_metrics2(cls, db=None, entity=None):
"""
Strategy #2: Take a list of all classes and search for target class
Which strategy is used for our final setting? I do not know!
:param db:
:param entity:
:return:
"""
# 1. Understand built-in class metrics
class_metrics = entity.metric(entity.metrics())
# print('number of metrics for class "{0}": {1}, and metrics: {2}'.format(entity.longname(),
# len(class_metrics), class_metrics), )
# for i, metric in enumerate(class_metrics.keys()):
# print(i + 1, ': ', metric, class_metrics[metric])
# print(class_metrics['AvgCyclomatic'])
# 2. Systematically created metrics
j_code_odor_metric = JCodeOdorMetric()
method_list = UnderstandUtility.get_method_of_class_java2(db=db, class_name=entity.longname())
if method_list is None:
raise TypeError('method_list is none for class "{}"'.format(entity.longname()))
# 2.1 CSCC
class_cyclomatic_list = list()
class_cyclomatic_namm_list = list()
class_cyclomatic_strict_list = list()
class_cyclomatic_strict_namm_list = list()
class_cyclomatic_modified_list = list()
class_cyclomatic_modified_namm_list = list()
class_essential_list = list()
class_essential_namm_list = list()
for method in method_list:
class_cyclomatic_list.append(method.metric(['Cyclomatic'])['Cyclomatic'])
class_cyclomatic_strict_list.append(method.metric(['CyclomaticStrict'])['CyclomaticStrict'])
class_cyclomatic_modified_list.append(method.metric(['CyclomaticModified'])['CyclomaticModified'])
class_essential_list.append(method.metric(['Essential'])['Essential'])
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
class_cyclomatic_namm_list.append(method.metric(['Cyclomatic'])['Cyclomatic'])
class_cyclomatic_strict_namm_list.append(method.metric(['CyclomaticStrict'])['CyclomaticStrict'])
class_cyclomatic_modified_namm_list.append(method.metric(['CyclomaticModified'])['CyclomaticModified'])
class_essential_namm_list.append(method.metric(['Essential'])['Essential'])
cls.remove_none_from_lists([class_cyclomatic_list, class_cyclomatic_namm_list,
class_cyclomatic_strict_list, class_cyclomatic_strict_namm_list,
class_cyclomatic_modified_list, class_cyclomatic_modified_namm_list,
class_essential_list, class_essential_namm_list])
# CSCC
# 2.1.13
class_metrics.update({'MinCyclomatic': min(class_cyclomatic_list)})
# 2.1.14
class_metrics.update({'MinCyclomaticStrict': min(class_cyclomatic_strict_list)})
# 2.1.15
class_metrics.update({'MinCyclomaticModified': min(class_cyclomatic_modified_list)})
# 2.1.16
class_metrics.update({'MinEssential': min(class_essential_list)})
# 2.1.17
class_metrics.update({'SDCyclomatic': np.std(class_cyclomatic_list)})
# 2.1.18
class_metrics.update({'SDCyclomaticStrict': np.std(class_cyclomatic_strict_list)})
# 2.1.19
class_metrics.update({'SDCyclomaticModified': np.std(class_cyclomatic_modified_list)})
# 2.1.20
class_metrics.update({'SDEssential': np.std(class_essential_list)})
class_metrics.update({'LogCyclomatic': math.log10(sum(class_cyclomatic_list) + 1)})
class_metrics.update({'LogCyclomaticStrict': math.log10(sum(class_cyclomatic_strict_list) + 1)})
class_metrics.update({'LogCyclomaticModified': math.log10(sum(class_cyclomatic_modified_list) + 1)})
class_metrics.update({'LogEssential': math.log10(sum(class_essential_list) + 1)})
# CSCCNAMM
# 2.1.21
class_metrics.update({'SumCyclomaticNAMM': sum(class_cyclomatic_namm_list)})
# 2.1.22
class_metrics.update({'SumCyclomaticStrictNAMM': sum(class_cyclomatic_strict_namm_list)})
# 2.1.23
class_metrics.update({'SumCyclomaticModifiedNAMM': sum(class_cyclomatic_modified_namm_list)})
# 2.1.24
class_metrics.update({'SumEssentialNAMM': sum(class_essential_namm_list)})
# 2.1.25
class_metrics.update({'MaxCyclomaticNAMM': max(class_cyclomatic_namm_list)})
# 2.1.26
class_metrics.update({'MaxCyclomaticStrictNAMM': max(class_cyclomatic_strict_namm_list)})
# 2.1.27
class_metrics.update({'MaxCyclomaticModifiedNAMM': max(class_cyclomatic_modified_namm_list)})
# 2.1.28
class_metrics.update({'MaxEssentialNAMM': max(class_essential_namm_list)})
# 2.1.29
class_metrics.update({'AvgCyclomaticNAMM': sum(class_cyclomatic_namm_list) / len(class_cyclomatic_namm_list)})
# 2.1.30
class_metrics.update({'AvgCyclomaticStrictNAMM': sum(class_cyclomatic_strict_namm_list) / len(
class_cyclomatic_strict_namm_list)})
# 2.1.31
class_metrics.update({'AvgCyclomaticModifiedNAMM': sum(class_cyclomatic_modified_namm_list) / len(
class_cyclomatic_modified_namm_list)})
# 2.1.32
class_metrics.update({'AvgEssentialNAMM': sum(class_essential_namm_list) / len(class_essential_namm_list)})
# 2.1.33
class_metrics.update({'MinCyclomaticNAMM': min(class_cyclomatic_namm_list)})
# 2.1.34
class_metrics.update({'MinCyclomaticStrictNAMM': min(class_cyclomatic_strict_namm_list)})
# 2.1.35
class_metrics.update({'MinCyclomaticModifiedNAMM': min(class_cyclomatic_modified_namm_list)})
# 2.1.36
class_metrics.update({'MinEssentialNAMM': min(class_essential_namm_list)})
# 2.1.37
class_metrics.update({'SDCyclomaticNAMM': np.std(class_cyclomatic_namm_list)})
# 2.1.38
class_metrics.update({'SDCyclomaticStrictNAMM': np.std(class_cyclomatic_strict_namm_list)})
# 2.1.39
class_metrics.update({'SDCyclomaticModifiedNAMM': np.std(class_cyclomatic_modified_namm_list)})
# 2.1.40
class_metrics.update({'SDEssentialNAMM': np.std(class_essential_namm_list)})
# 2.2 CSNOP (10)
#
parameters_length_list = list()
parameters_length_namm_list = list()
# number_of_parameters = 0
# print('method list', len(method_list))
for method in method_list:
# if method.library() != "Standard":
# print('method params', method.longname(), '-->', method.parameters())
params = method.parameters().split(',')
if len(params) == 1:
if params[0] == ' ' or params[0] == '' or params[0] is None:
parameters_length_list.append(0)
else:
parameters_length_list.append(1)
else:
parameters_length_list.append(len(params))
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
if len(params) == 1:
if params[0] == ' ' or params[0] == '' or params[0] is None:
parameters_length_namm_list.append(0)
else:
parameters_length_namm_list.append(1)
else:
parameters_length_namm_list.append(len(params))
cls.remove_none_from_lists([parameters_length_list, parameters_length_namm_list])
# print('number of parameters', number_of_parameters)
# CSNOP
# 2.2.1
class_metrics.update({'SumCSNOP': sum(parameters_length_list)})
# 2.2.2
class_metrics.update({'MaxCSNOP': max(parameters_length_list)})
# 2.2.3
class_metrics.update({'MinCSNOP': min(parameters_length_list)})
# 2.2.4
class_metrics.update({'AvgCSNOP': sum(parameters_length_list) / len(parameters_length_list)})
# 2.2.5
class_metrics.update({'SDCSNOP': np.std(parameters_length_list)})
# CSNOP_NAMM
# 2.2.6
class_metrics.update({'SumCSNOPNAMM': sum(parameters_length_namm_list)})
# 2.2.7
class_metrics.update({'MaxCSNOPNAMM': max(parameters_length_namm_list)})
# 2.2.8
class_metrics.update({'MinCSNOPNAMM': min(parameters_length_namm_list)})
# 2.2.9
class_metrics.update({'AvgCSNOPNAMM': sum(parameters_length_namm_list) / len(parameters_length_namm_list)})
# 2.2.10
class_metrics.update({'SDCSNOPNAMM': np.std(parameters_length_namm_list)})
# 2.3 SCLOC (30)
#
line_of_code_list = list()
line_of_code_namm_list = list()
line_of_code_decl_list = list()
line_of_code_decl_namm_list = list()
line_of_code_exe_list = list()
line_of_code_exe_namm_list = list()
for method in method_list:
line_of_code_list.append(method.metric(['CountLineCode'])['CountLineCode'])
line_of_code_decl_list.append(method.metric(['CountLineCodeDecl'])['CountLineCodeDecl'])
line_of_code_exe_list.append(method.metric(['CountLineCodeExe'])['CountLineCodeExe'])
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
line_of_code_namm_list.append(method.metric(['CountLineCode'])['CountLineCode'])
line_of_code_decl_namm_list.append(method.metric(['CountLineCodeDecl'])['CountLineCodeDecl'])
line_of_code_exe_namm_list.append(method.metric(['CountLineCodeExe'])['CountLineCodeExe'])
cls.remove_none_from_lists([line_of_code_list, line_of_code_namm_list,
line_of_code_decl_list, line_of_code_decl_namm_list,
line_of_code_exe_list, line_of_code_exe_namm_list])
# CSLOC_All
# 2.3.5
class_metrics.update({'AvgLineCodeDecl': sum(line_of_code_decl_list) / len(line_of_code_decl_list)})
# 2.3.6
class_metrics.update({'AvgLineCodeExe': sum(line_of_code_exe_list) / len(line_of_code_exe_list)})
# 2.3.7
class_metrics.update({'MaxLineCode': max(line_of_code_list)})
# 2.3.8
class_metrics.update({'MaxLineCodeDecl': max(line_of_code_decl_list)})
# 2.3.9
class_metrics.update({'MaxLineCodeExe': max(line_of_code_exe_list)})
# 2.3.10
class_metrics.update({'MinLineCode': min(line_of_code_list)})
# 2.3.11
class_metrics.update({'MinLineCodeDecl': min(line_of_code_decl_list)})
# 2.3.12
class_metrics.update({'MinLineCodeExe': min(line_of_code_exe_list)})
# 2.3.13
class_metrics.update({'SDLineCode': np.std(line_of_code_list)})
# 2.3.14
class_metrics.update({'SDLineCodeDecl': np.std(line_of_code_decl_list)})
# 2.3.15
class_metrics.update({'SDLineCodeExe': np.std(line_of_code_exe_list)})
class_metrics.update({'LogLineCode': math.log10(sum(line_of_code_list) + 1)})
class_metrics.update({'LogLineCodeDecl': math.log10(sum(line_of_code_decl_list) + 1)})
class_metrics.update({'LogLineCodeExe': math.log10(sum(line_of_code_exe_list) + 1)})
# CSLOC_NAMM
# 2.3.16
class_metrics.update({'CountLineCodeNAMM': sum(line_of_code_namm_list)})
# 2.3.17
class_metrics.update({'CountLineCodeDeclNAMM': sum(line_of_code_decl_namm_list)})
# print('!@#', sum(line_of_code_decl_namm_list))
# quit()
# 2.3.18
class_metrics.update({'CountLineCodeExeNAMM': sum(line_of_code_exe_namm_list)})
# 2.3.19
class_metrics.update({'AvgLineCodeNAMM': sum(line_of_code_namm_list) / len(line_of_code_namm_list)})
# 2.3.20
class_metrics.update(
{'AvgLineCodeDeclNAMM': sum(line_of_code_decl_namm_list) / len(line_of_code_decl_namm_list)})
# 2.3.21
class_metrics.update({'AvgLineCodeExeNAMM': sum(line_of_code_exe_namm_list) / len(line_of_code_exe_namm_list)})
# 2.3.22
class_metrics.update({'MaxLineCodeNAMM': max(line_of_code_namm_list)})
# 2.3.23
class_metrics.update({'MaxLineCodeDeclNAMM': max(line_of_code_decl_namm_list)})
# 2.3.24
class_metrics.update({'MaxLineCodeExeNAMM': max(line_of_code_exe_namm_list)})
# 2.3.25
class_metrics.update({'MinLineCodeNAMM': min(line_of_code_namm_list)})
# 2.3.26
class_metrics.update({'MinLineCodeDeclNAMM': min(line_of_code_decl_namm_list)})
# 2.3.27
class_metrics.update({'MinLineCodeExeNAMM': min(line_of_code_exe_namm_list)})
# 2.3.28
class_metrics.update({'SDLineCodeNAMM': np.std(line_of_code_namm_list)})
# 2.3.29
class_metrics.update({'SDLineCodeDeclNAMM': np.std(line_of_code_decl_namm_list)})
# print('!@#', np.std(line_of_code_decl_namm_list))
# quit()
# 2.3.30
class_metrics.update({'SDLineCodeExeNAMM': np.std(line_of_code_exe_namm_list)})
# ----------------------------------------------------------------
# 2.4 CSNOST (3-->30)
# To be completed in future work
number_of_stmt_list = list()
number_of_stmt_namm_list = list()
number_of_stmt_decl_list = list()
number_of_stmt_decl_namm_list = list()
number_of_stmt_exe_list = list()
number_of_stmt_exe_namm_list = list()
for method in method_list:
number_of_stmt_list.append(method.metric(['CountStmt'])['CountStmt'])
number_of_stmt_decl_list.append(method.metric(['CountStmtDecl'])['CountStmtDecl'])
number_of_stmt_exe_list.append(method.metric(['CountStmtExe'])['CountStmtExe'])
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
number_of_stmt_namm_list.append(method.metric(['CountStmt'])['CountStmt'])
number_of_stmt_decl_namm_list.append(method.metric(['CountStmtDecl'])['CountStmtDecl'])
number_of_stmt_exe_namm_list.append(method.metric(['CountStmtExe'])['CountStmtExe'])
cls.remove_none_from_lists([number_of_stmt_list, number_of_stmt_namm_list,
number_of_stmt_decl_list, number_of_stmt_decl_namm_list,
number_of_stmt_exe_list, number_of_stmt_exe_namm_list])
# CSNOST_All
# 2.4.4
class_metrics.update({'AvgStmt': sum(number_of_stmt_list) / len(number_of_stmt_list)})
# 2.4.5
class_metrics.update({'AvgStmtDecl': sum(number_of_stmt_decl_list) / len(number_of_stmt_decl_list)})
# 2.4.6
class_metrics.update({'AvgStmtExe': sum(number_of_stmt_exe_list) / len(number_of_stmt_exe_list)})
# 2.4.7
class_metrics.update({'MaxStmt': max(number_of_stmt_list)})
# 2.4.8
class_metrics.update({'MaxStmtDecl': max(number_of_stmt_decl_list)})
# 2.4.9
class_metrics.update({'MaxStmtExe': max(number_of_stmt_exe_list)})
# 2.4.10
class_metrics.update({'MinStmt': min(number_of_stmt_list)})
# 2.4.11
class_metrics.update({'MinStmtDecl': min(number_of_stmt_decl_list)})
# 2.4.12
class_metrics.update({'MinStmtExe': min(number_of_stmt_exe_list)})
# 2.4.13
class_metrics.update({'SDStmt': np.std(number_of_stmt_list)})
# 2.4.14
class_metrics.update({'SDStmtDecl': np.std(number_of_stmt_decl_list)})
# 2.4.15
class_metrics.update({'SDStmtExe': np.std(number_of_stmt_exe_list)})
class_metrics.update({'LogStmt': math.log10(sum(number_of_stmt_list) + 1)})
class_metrics.update({'LogStmtDecl': math.log10(sum(number_of_stmt_decl_list) + 1)})
class_metrics.update({'LogStmtExe': math.log10(sum(number_of_stmt_exe_list) + 1)})
# CSNOST_NAMM
# 2.4.16
class_metrics.update({'CountStmtNAMM': sum(number_of_stmt_namm_list)})
# 2.4.17
class_metrics.update({'CountStmtDeclNAMM': sum(number_of_stmt_decl_namm_list)})
# 2.4.18
class_metrics.update({'CountStmtExeNAMM': sum(number_of_stmt_exe_namm_list)})
# 2.4.19
class_metrics.update({'AvgStmtNAMM': sum(number_of_stmt_namm_list) / len(number_of_stmt_namm_list)})
# 2.4.20
class_metrics.update(
{'AvgStmtDeclNAMM': sum(number_of_stmt_decl_namm_list) / len(number_of_stmt_decl_namm_list)})
# 2.4.21
class_metrics.update({'AvgStmtExeNAMM': sum(number_of_stmt_exe_namm_list) / len(number_of_stmt_exe_namm_list)})
# 2.4.22
class_metrics.update({'MaxStmtNAMM': max(number_of_stmt_namm_list)})
# 2.4.23
class_metrics.update({'MaxStmtDeclNAMM': max(number_of_stmt_decl_namm_list)})
# 2.4.24
class_metrics.update({'MaxStmtExeNAMM': max(number_of_stmt_exe_namm_list)})
# 2.4.25
class_metrics.update({'MinStmtNAMM': min(number_of_stmt_namm_list)})
# 2.4.26
class_metrics.update({'MinStmtDeclNAMM': min(number_of_stmt_decl_namm_list)})
# 2.4.27
class_metrics.update({'MinStmtExeNAMM': min(number_of_stmt_exe_namm_list)})
# 2.4.28
class_metrics.update({'SDStmtNAMM': np.std(number_of_stmt_namm_list)})
# 2.4.29
class_metrics.update({'SDStmtDeclNAMM': np.std(number_of_stmt_decl_namm_list)})
# 2.4.30
class_metrics.update({'SDStmtExeNAMM': np.std(number_of_stmt_exe_namm_list)})
# Class number of not accessor or mutator methods
# Class max_nesting (4)
CSNOMNAMM = 0
max_nesting_list = list()
for method in method_list:
max_nesting_list.append(method.metric(['MaxNesting'])['MaxNesting'])
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
CSNOMNAMM += 1
cls.remove_none_from_lists([max_nesting_list])
class_metrics.update({'CSNOMNAMM': CSNOMNAMM})
class_metrics.update({'MinNesting': min(max_nesting_list)})
class_metrics.update({'AvgNesting': sum(max_nesting_list) / len(max_nesting_list)})
class_metrics.update({'SDNesting': np.std(max_nesting_list)})
# Custom (JCodeOdor) coupling metrics
class_metrics.update({'RFC': j_code_odor_metric.RFC(class_name=entity)})
class_metrics.update({'FANIN': j_code_odor_metric.FANIN(db=db, class_entity=entity)})
class_metrics.update({'FANOUT': j_code_odor_metric.FANOUT(db=db, class_entity=entity)})
class_metrics.update({'ATFD': UnderstandUtility.ATFD(db=db, class_entity=entity)}) ### not implement
class_metrics.update({'CFNAMM': j_code_odor_metric.CFNAMM_Class(class_name=entity)})
class_metrics.update({'DAC': UnderstandUtility.get_data_abstraction_coupling(db=db, class_entity=entity)})
class_metrics.update({'NumberOfMethodCalls': UnderstandUtility.number_of_method_call(class_entity=entity)})
# Visibility metrics
# Understand built-in metrics plus one custom metric.
class_metrics.update({'CSNOAMM': j_code_odor_metric.NOMAMM(class_entity=entity)})
# Inheritance metrics
class_metrics.update({'NIM': j_code_odor_metric.NIM(class_name=entity)})
class_metrics.update({'NMO': j_code_odor_metric.NMO(class_name=entity)})
class_metrics.update({'NOII': UnderstandUtility.NOII(db=db)}) # Not implemented
# ---------------------------------------
# New added metric (version 0.3.0, dataset 0.5.0)
class_count_path_list = list()
class_count_path_log_list = list()
class_knots_list = list()
for method in method_list:
class_count_path_list.append(method.metric(['CountPath'])['CountPath'])
class_count_path_log_list.append(method.metric(['CountPathLog'])['CountPathLog'])
class_knots_list.append(method.metric(['Knots'])['Knots'])
cls.remove_none_from_lists([class_count_path_list, class_count_path_log_list, class_knots_list])
class_metrics.update({'SumCountPath': sum(class_count_path_list)})
class_metrics.update({'MinCountPath': min(class_count_path_list)})
class_metrics.update({'MaxCountPath': max(class_count_path_list)})
class_metrics.update({'AvgCountPath': sum(class_count_path_list) / len(class_count_path_list)})
class_metrics.update({'SDCountPath': np.std(class_count_path_list)})
class_metrics.update({'SumCountPathLog': sum(class_count_path_log_list)})
class_metrics.update({'MinCountPathLog': min(class_count_path_log_list)})
class_metrics.update({'MaxCountPathLog': max(class_count_path_log_list)})
class_metrics.update({'AvgCountPathLog': sum(class_count_path_log_list) / len(class_count_path_log_list)})
class_metrics.update({'SDCountPathLog': np.std(class_count_path_log_list)})
class_metrics.update({'SumKnots': sum(class_knots_list)})
class_metrics.update({'MinKnots': min(class_knots_list)})
class_metrics.update({'MaxKnots': max(class_knots_list)})
class_metrics.update({'AvgKnots': sum(class_knots_list) / len(class_knots_list)})
class_metrics.update({'SDKnots': np.std(class_knots_list)})
constructor = UnderstandUtility.get_constructor_of_class_java(db=db, class_name=entity.longname())
class_metrics.update({'NumberOfClassConstructors': len(constructor)})
class_metrics.update({'NumberOfDepends': len(entity.depends())})
class_metrics.update({'NumberOfDependsBy': len(entity.dependsby())})
class_metrics.update({'NumberOfClassInItsFile': len(
UnderstandUtility.get_number_of_class_in_file_java(db=db, class_entity=entity))})
return class_metrics
@classmethod
def compute_java_class_metrics_lexicon(cls, db=None, entity=None):
"""
:param db:
:param entity:
:return:
"""
class_lexicon_metrics_dict = dict()
# for ib in entity.ib():
# print('entity ib', ib)
# Compute lexicons
tokens_list = list()
identifiers_list = list()
keywords_list = list()
operators_list = list()
return_and_print_count = 0
return_and_print_kw_list = ['return', 'print', 'printf', 'println', 'write', 'writeln']
condition_count = 0
condition_kw_list = ['if', 'for', 'while', 'switch', '?', 'assert', ]
uncondition_count = 0
uncondition_kw_list = ['break', 'continue', ]
exception_count = 0
exception_kw_list = ['try', 'catch', 'throw', 'throws', 'finally', ]
new_count = 0
new_count_kw_list = ['new']
super_count = 0
super_count_kw_list = ['super']
dots_count = 0
try:
# print('ec', entity.parent().id())
# source_file_entity = db.ent_from_id(entity.parent().id())
# print('file', type(source_file_entity), source_file_entity.longname())
for lexeme in entity.lexer(show_inactive=False):
# print(lexeme.text(), ': ', lexeme.token())
tokens_list.append(lexeme.text())
if lexeme.token() == 'Identifier':
identifiers_list.append(lexeme.text())
if lexeme.token() == 'Keyword':
keywords_list.append(lexeme.text())
if lexeme.token() == 'Operator':
operators_list.append(lexeme.text())
if lexeme.text() in return_and_print_kw_list:
return_and_print_count += 1
if lexeme.text() in condition_kw_list:
condition_count += 1
if lexeme.text() in uncondition_kw_list:
uncondition_count += 1
if lexeme.text() in exception_kw_list:
exception_count += 1
if lexeme.text() in new_count_kw_list:
new_count += 1
if lexeme.text() in super_count_kw_list:
super_count += 1
if lexeme.text() == '.':
dots_count += 1
except:
raise RuntimeError('Error in computing class lexical metrics for class "{0}"'.format(entity.longname()))
number_of_assignments = operators_list.count('=')
number_of_operators_without_assignments = len(operators_list) - number_of_assignments
number_of_unique_operators = len(set(list(filter('='.__ne__, operators_list))))
class_lexicon_metrics_dict.update({'NumberOfTokens': len(tokens_list)})
class_lexicon_metrics_dict.update({'NumberOfUniqueTokens': len(set(tokens_list))})
class_lexicon_metrics_dict.update({'NumberOfIdentifies': len(identifiers_list)})
class_lexicon_metrics_dict.update({'NumberOfUniqueIdentifiers': len(set(identifiers_list))})
class_lexicon_metrics_dict.update({'NumberOfKeywords': len(keywords_list)})
class_lexicon_metrics_dict.update({'NumberOfUniqueKeywords': len(set(keywords_list))})
class_lexicon_metrics_dict.update(
{'NumberOfOperatorsWithoutAssignments': number_of_operators_without_assignments})
class_lexicon_metrics_dict.update({'NumberOfAssignments': number_of_assignments})
class_lexicon_metrics_dict.update({'NumberOfUniqueOperators': number_of_unique_operators})
class_lexicon_metrics_dict.update({'NumberOfDots': dots_count})
class_lexicon_metrics_dict.update({'NumberOfSemicolons': entity.metric(['CountSemicolon'])['CountSemicolon']})
class_lexicon_metrics_dict.update({'NumberOfReturnAndPrintStatements': return_and_print_count})
class_lexicon_metrics_dict.update({'NumberOfConditionalJumpStatements': condition_count})
class_lexicon_metrics_dict.update({'NumberOfUnConditionalJumpStatements': uncondition_count})
class_lexicon_metrics_dict.update({'NumberOfExceptionStatements': exception_count})
class_lexicon_metrics_dict.update({'NumberOfNewStatements': new_count})
class_lexicon_metrics_dict.update({'NumberOfSuperStatements': super_count})
# print('Class lexicon metrics:', class_lexicon_metrics_dict)
return class_lexicon_metrics_dict
@classmethod
def compute_java_package_metrics(cls, db=None, class_name: str = None):
# print('ib', entity.ib())
# package_name = ''
# Find package: strategy 1
# for ib in entity.ib():
# if ib.find('Package:') != -1:
# sp = ib.split(':')
# print('entity ib', sp[1][1:-1])
# package_name = sp[1][1:-1]
# Find package: strategy 2: Dominated strategy
class_name_list = class_name.split('.')[:-1]
package_name = '.'.join(class_name_list)
# print('package_name string', package_name)
package_list = db.lookup(package_name + '$', 'Package')
if package_list is None:
return None
if len(package_list) == 0: # if len != 1 return None!
return None
package = package_list[0]
# print('kind:', package.kind())
print('Computing package metrics for class: "{0}" in package: "{1}"'.format(class_name, package.longname()))
# Print info
# print('package metrics')
package_metrics = package.metric(package.metrics())
# print('number of metrics:', len(metrics), metrics)
# for i, metric in enumerate(metrics.keys()):
# print(i + 1, ': ', metric, metrics[metric])
# print('class metrics')
# metrics2 = entity.metric(entity.metrics())
# print('number of metrics:', len(metrics), metrics2)
# for i, metric2 in enumerate(metrics.keys()):
# print(i + 1, ': ', metric2, metrics[metric2])
#
# print(package.refs('Definein'))
# for defin in package.refs('Definein'):
# print('kind', defin.ent().kind())
# print(defin, '-->', defin.ent().ents('Java Define', 'Class'))
# metrics = entity.metric(defin.ent().metrics())
# print('number of metrics in file:', len(metrics), metrics)
# for i, metric in enumerate(metrics.keys()):
# print(i + 1, ': ', metric, metrics[metric])
classes_and_interfaces_list = UnderstandUtility.get_package_clasess_java(package_entity=package)
# print(classes_and_interfaces_list)
# quit()
# 2. Custom package metrics
# 2.1. PKLOC (15)
pk_loc_list = list()
pk_loc_decl_list = list()
pk_loc_exe_list = list()
for type_entity in classes_and_interfaces_list:
pk_loc_list.append(type_entity.metric(['CountLineCode'])['CountLineCode'])
pk_loc_decl_list.append(type_entity.metric(['CountLineCodeDecl'])['CountLineCodeDecl'])
pk_loc_exe_list.append(type_entity.metric(['CountLineCodeExe'])['CountLineCodeExe'])
cls.remove_none_from_lists([pk_loc_list, pk_loc_decl_list, pk_loc_exe_list])
try:
package_metrics.update({'AvgLineCodeDecl': sum(pk_loc_decl_list) / len(pk_loc_decl_list)})
package_metrics.update({'AvgLineCodeExe': sum(pk_loc_exe_list) / len(pk_loc_exe_list)})
package_metrics.update({'MaxLineCode': max(pk_loc_list)})
package_metrics.update({'MaxLineCodeDecl': max(pk_loc_decl_list)})
package_metrics.update({'MaxLineCodeExe': max(pk_loc_exe_list)})
package_metrics.update({'MinLineCode': min(pk_loc_list)})
package_metrics.update({'MinLineCodeDecl': min(pk_loc_decl_list)})
package_metrics.update({'MinLineCodeExe': min(pk_loc_exe_list)})
package_metrics.update({'SDLineCode': np.std(pk_loc_list)})
package_metrics.update({'SDLineCodeDecl': np.std(pk_loc_decl_list)})
package_metrics.update({'SDLineCodeExe': np.std(pk_loc_exe_list)})
except:
raise TypeError('Error happen when compute packege metric for class "{0}" and list "{1}"'.format(class_name,
pk_loc_decl_list))
# 2.2 PKNOS (15)
pk_stmt_list = list()
pk_stmt_decl_list = list()
pk_stmt_exe_list = list()
for type_entity in classes_and_interfaces_list:
pk_stmt_list.append(type_entity.metric(['CountStmt'])['CountStmt'])
pk_stmt_decl_list.append(type_entity.metric(['CountStmtDecl'])['CountStmtDecl'])
pk_stmt_exe_list.append(type_entity.metric(['CountStmtExe'])['CountStmtExe'])
cls.remove_none_from_lists([pk_stmt_list, pk_stmt_decl_list, pk_stmt_exe_list])
package_metrics.update({'AvgStmt': sum(pk_stmt_decl_list) / len(pk_stmt_decl_list)})
package_metrics.update({'AvgStmtDecl': sum(pk_stmt_decl_list) / len(pk_stmt_decl_list)})
package_metrics.update({'AvgStmtExe': sum(pk_stmt_exe_list) / len(pk_stmt_exe_list)})
package_metrics.update({'MaxStmt': max(pk_stmt_list)})
package_metrics.update({'MaxStmtDecl': max(pk_stmt_decl_list)})
package_metrics.update({'MaxStmtExe': max(pk_stmt_exe_list)})
package_metrics.update({'MinStmt': min(pk_stmt_list)})
package_metrics.update({'MinStmtDecl': min(pk_stmt_decl_list)})
package_metrics.update({'MinStmtExe': min(pk_stmt_exe_list)})
package_metrics.update({'SDStmt': np.std(pk_stmt_list)})
package_metrics.update({'SDStmtDecl': np.std(pk_stmt_decl_list)})
package_metrics.update({'SDStmtExe': np.std(pk_stmt_exe_list)})
# 2.3 PKCC (20)
pk_cyclomatic_list = list()
pk_cyclomatic_namm_list = list()
pk_cyclomatic_strict_list = list()
pk_cyclomatic_strict_namm_list = list()
pk_cyclomatic_modified_list = list()
pk_cyclomatic_modified_namm_list = list()
pk_essential_list = list()
pk_essential_namm_list = list()
for type_entity in classes_and_interfaces_list:
pk_cyclomatic_list.append(type_entity.metric(['SumCyclomatic'])['SumCyclomatic'])
pk_cyclomatic_modified_list.append(type_entity.metric(['SumCyclomaticModified'])['SumCyclomaticModified'])
pk_cyclomatic_strict_list.append(type_entity.metric(['SumCyclomaticStrict'])['SumCyclomaticStrict'])
pk_essential_list.append(type_entity.metric(['SumEssential'])['SumEssential'])
cls.remove_none_from_lists(
[pk_cyclomatic_list, pk_cyclomatic_strict_list, pk_cyclomatic_modified_list, pk_essential_list])
package_metrics.update({'MinCyclomatic': min(pk_cyclomatic_list)})
package_metrics.update({'MinCyclomaticModified': min(pk_cyclomatic_modified_list)})
package_metrics.update({'MinCyclomaticStrict': min(pk_cyclomatic_strict_list)})
package_metrics.update({'MinEssential': min(pk_essential_list)})
package_metrics.update({'SDCyclomatic': np.std(pk_cyclomatic_list)})
package_metrics.update({'SDCyclomaticModified': np.std(pk_cyclomatic_modified_list)})
package_metrics.update({'SDCyclomaticStrict': np.std(pk_cyclomatic_strict_list)})
package_metrics.update({'SDEssential': np.std(pk_essential_list)})
# 2.4 PKNESTING (4)
pk_nesting_list = list()
for type_entity in classes_and_interfaces_list:
pk_nesting_list.append(type_entity.metric(['MaxNesting'])['MaxNesting'])
cls.remove_none_from_lists([pk_nesting_list])
package_metrics.update({'MinNesting': min(pk_nesting_list)})
package_metrics.update({'AvgNesting': sum(pk_nesting_list) / len(pk_nesting_list)})
package_metrics.update({'SDNesting': np.std(pk_nesting_list)})
# 2.5
# Other Size/Count metrics (understand built-in metrics)
# PKNOMNAMM: Package number of not accessor or mutator methods
j_code_odor = JCodeOdorMetric()
pk_not_accessor_and_mutator_methods_list = list()
pk_accessor_and_mutator_methods_list = list()
for type_entity in classes_and_interfaces_list:
pk_not_accessor_and_mutator_methods_list.append(j_code_odor.NOMNAMM(type_entity))
pk_accessor_and_mutator_methods_list.append(j_code_odor.NOMAMM(type_entity))
cls.remove_none_from_lists([pk_not_accessor_and_mutator_methods_list, pk_accessor_and_mutator_methods_list])
package_metrics.update({'PKNOMNAMM': sum(pk_not_accessor_and_mutator_methods_list)})
# 2.6 Visibility metrics
# Other Visibility metrics metrics (understand built-in metrics)
package_metrics.update({'PKNOAMM': sum(pk_accessor_and_mutator_methods_list)})
# To add other visibility metrics
# 2.7 Inheritance metrics
package_metrics.update({'PKNOI': len(UnderstandUtility.get_package_interfaces_java(package_entity=package))})
package_metrics.update(
{'PKNOAC': len(UnderstandUtility.get_package_abstract_class_java(package_entity=package))})
# print(len(package_metrics))
# print(package_metrics)
return package_metrics
@classmethod
def compute_java_project_metrics(cls, db):
project_metrics = db.metric(db.metrics())
# print('number of metrics:', len(project_metrics), project_metrics)
# for i, metric in enumerate( project_metrics.keys()):
# print(i + 1, ': ', metric, project_metrics[metric])
# print(project_metrics) # Print Understand built-in metrics
# 2 Custom project metrics
files = UnderstandUtility.get_project_files_java(db=db)
# 2.1 PJLOC (30)
pj_loc_list = list()
pj_loc_decl_list = list()
pj_loc_exe_list = list()
pj_stmt_list = list()
pj_stmt_decl_list = list()
pj_stmt_exe_list = list()
for file_entity in files:
pj_loc_list.append(file_entity.metric(['CountLineCode'])['CountLineCode'])
pj_loc_decl_list.append(file_entity.metric(['CountLineCodeDecl'])['CountLineCodeDecl'])
pj_loc_exe_list.append(file_entity.metric(['CountLineCodeExe'])['CountLineCodeExe'])
pj_stmt_list.append(file_entity.metric(['CountStmt'])['CountStmt'])
pj_stmt_decl_list.append(file_entity.metric(['CountStmtDecl'])['CountStmtDecl'])
pj_stmt_exe_list.append(file_entity.metric(['CountStmtExe'])['CountStmtExe'])
cls.remove_none_from_lists([pj_loc_list, pj_loc_decl_list, pj_loc_exe_list,
pj_stmt_list, pj_stmt_decl_list, pj_stmt_exe_list])
project_metrics.update({'AvgLineCodeDecl': sum(pj_loc_decl_list) / len(pj_loc_decl_list)})
project_metrics.update({'AvgLineCodeExe': sum(pj_loc_exe_list) / len(pj_loc_exe_list)})
project_metrics.update({'MaxLineCode': max(pj_loc_list)})
project_metrics.update({'MaxLineCodeDecl': max(pj_loc_decl_list)})
project_metrics.update({'MaxLineCodeExe': max(pj_loc_exe_list)})
project_metrics.update({'MinLineCode': min(pj_loc_list)})
project_metrics.update({'MinLineCodeDecl': min(pj_loc_decl_list)})
project_metrics.update({'MinLineCodeExe': min(pj_loc_exe_list)})
project_metrics.update({'SDLineCode': np.std(pj_loc_list)})
project_metrics.update({'SDLineCodeDecl': np.std(pj_loc_decl_list)})
project_metrics.update({'SDLineCodeExe': np.std(pj_loc_exe_list)})
# 2.2. PJNOST (15)
project_metrics.update({'AvgStmt': sum(pj_stmt_list) / len(pj_stmt_list)})
project_metrics.update({'AvgStmtDecl': sum(pj_stmt_decl_list) / len(pj_stmt_decl_list)})
project_metrics.update({'AvgStmtExe': sum(pj_stmt_exe_list) / len(pj_stmt_exe_list)})
project_metrics.update({'MaxStmt': max(pj_stmt_list)})
project_metrics.update({'MaxStmtDecl': max(pj_stmt_decl_list)})
project_metrics.update({'MaxStmtExe': max(pj_stmt_exe_list)})
project_metrics.update({'MinStmt': min(pj_stmt_list)})
project_metrics.update({'MinStmtDecl': min(pj_stmt_decl_list)})
project_metrics.update({'MinStmtExe': min(pj_stmt_exe_list)})
project_metrics.update({'SDStmt': np.std(pj_stmt_list)})
project_metrics.update({'SDStmtDecl': np.std(pj_stmt_decl_list)})
project_metrics.update({'SDStmtExe': np.std(pj_stmt_exe_list)})
# 2.3 Other Count/Size metrics
packages = db.ents('Java Package')
# print('number of packages', len(packages))
project_metrics.update({'NumberOfPackages': len(packages)})
j_code_odor = JCodeOdorMetric()
pj_number_of_method_namm = 0
for class_ in UnderstandUtility.get_project_classes_java(db=db):
pj_number_of_method_namm += j_code_odor.NOMNAMM(class_)
project_metrics.update({'PJNOMNAMM': pj_number_of_method_namm})
# 2.4 PJCC (20): Project cyclomatic complexity
pj_cyclomatic_list = list()
pj_cyclomatic_namm_list = list()
pj_cyclomatic_strict_list = list()
pj_cyclomatic_strict_namm_list = list()
pj_cyclomatic_modified_list = list()
pj_cyclomatic_modified_namm_list = list()
pj_essential_list = list()
pj_essential_namm_list = list()
for type_entity in files:
pj_cyclomatic_list.append(type_entity.metric(['SumCyclomatic'])['SumCyclomatic'])
pj_cyclomatic_modified_list.append(type_entity.metric(['SumCyclomaticModified'])['SumCyclomaticModified'])
pj_cyclomatic_strict_list.append(type_entity.metric(['SumCyclomaticStrict'])['SumCyclomaticStrict'])
pj_essential_list.append(type_entity.metric(['SumEssential'])['SumEssential'])
cls.remove_none_from_lists([pj_cyclomatic_list, pj_cyclomatic_strict_list,
pj_cyclomatic_modified_list, pj_essential_list])
project_metrics.update({'SumCyclomatic': sum(pj_cyclomatic_list)})
project_metrics.update({'SumCyclomaticModified': sum(pj_cyclomatic_modified_list)})
project_metrics.update({'SumCyclomaticStrict': sum(pj_cyclomatic_strict_list)})
project_metrics.update({'SumEssential': sum(pj_essential_list)})
project_metrics.update({'MaxCyclomatic': max(pj_cyclomatic_list)})
project_metrics.update({'MaxCyclomaticModified': max(pj_cyclomatic_modified_list)})
project_metrics.update({'MaxCyclomaticStrict': max(pj_cyclomatic_strict_list)})
project_metrics.update({'MaxEssential': max(pj_essential_list)})
project_metrics.update({'AvgCyclomatic': sum(pj_cyclomatic_list) / len(pj_cyclomatic_list)})
project_metrics.update(
{'AvgCyclomaticModified': sum(pj_cyclomatic_modified_list) / len(pj_cyclomatic_modified_list)})
project_metrics.update({'AvgCyclomaticStrict': sum(pj_cyclomatic_strict_list) / len(pj_cyclomatic_strict_list)})
project_metrics.update({'AvgEssential': sum(pj_essential_list) / len(pj_essential_list)})
project_metrics.update({'MinCyclomatic': min(pj_cyclomatic_list)})
project_metrics.update({'MinCyclomaticModified': min(pj_cyclomatic_modified_list)})
project_metrics.update({'MinCyclomaticStrict': min(pj_cyclomatic_strict_list)})
project_metrics.update({'MinEssential': min(pj_essential_list)})
project_metrics.update({'SDCyclomatic': np.std(pj_cyclomatic_list)})
project_metrics.update({'SDCyclomaticModified': np.std(pj_cyclomatic_modified_list)})
project_metrics.update({'SDCyclomaticStrict': np.std(pj_cyclomatic_strict_list)})
project_metrics.update({'SDEssential': np.std(pj_essential_list)})
# 2.4 PKNESTING (4)
pj_nesting_list = list()
for type_entity in files:
pj_nesting_list.append(type_entity.metric(['MaxNesting'])['MaxNesting'])
cls.remove_none_from_lists([pj_nesting_list])
project_metrics.update({'MinNesting': min(pj_nesting_list)})
project_metrics.update({'AvgNesting': sum(pj_nesting_list) / len(pj_nesting_list)})
project_metrics.update({'SDNesting': np.std(pj_nesting_list)})
# 3 Inheritance metrics
project_metrics.update({'PJNOI': len(UnderstandUtility.get_project_interfaces_java(db=db))})
project_metrics.update({'PJNAC': len(UnderstandUtility.get_project_abstract_classes_java(db=db))})
return project_metrics
@classmethod
def get_entity_kind(cls, db, class_name):
entity = db.lookup(class_name + '$', 'Type')
return entity[0].kindname()
@classmethod
def remove_none_from_lists(cls, lists: list = None):
for i, list_ in enumerate(lists):
if len(list_) == 0:
list_.append(0)
warnings.warn('Empty list passed!')
# else:
# list_ = [i for i in list_ if i is not None]
# if len(list_) == 0:
# list_.append(0)
# raise ValueError('Required data for systematic metric computation is not enough!')
# ------------------------------------------------------------------------
class PreProcess:
"""
"""
# -------------------------------------------
# Dataset creation API
@classmethod
def create_understand_database_from_project(cls, root_path=None):
# First path
# root_path = 'E:/LSSDS/EvoSuite/SF110-20130704-src/SF110-20130704-src/'
# Second path, after eliminating all test class form SF110
root_path = 'sf110_without_test/' # A place for both project sources and understand databases
# 'create -db C:\Users\NOLIMIT\Desktop\sbta -languages c# add C:\Users\NOLIMIT\Desktop\sbta analyze -all'
# {0}: understand_db_directory, {1}: understand_db_name, {2}: project_root_directory
cmd = 'und create -db {0}{1}.udb -languages java add {2} analyze -all'
# projects = [x[0] for x in os.walk(root_path)]
projects = [name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name))]
for project_ in projects:
command_ = cmd.format(root_path, project_, root_path + project_)
print('executing command {0}'.format(command_))
# returned_value_in_byte = subprocess.check_output(command_, shell=True)
os.system('cmd /c "{0}"'.format(command_))
# os.system('cmd / k "{0}"'.format(command_))
@classmethod
def extract_project_classes_all(cls, udbs_path, class_list_csv_path_root=r'class_list_csvs/'):
files = [f for f in os.listdir(udbs_path) if os.path.isfile(os.path.join(udbs_path, f))]
for f in files:
print('processing understand db file {0}:'.format(f))
db = understand.open(os.path.join(udbs_path, f))
cls.write_project_classes(project_name=f[:-4], db=db, csv_path=class_list_csv_path_root + f[:-4] + '.csv')
print('processing understand db file {0} was finished'.format(f))
db.close()
@classmethod
def extract_project_classes(cls, db):
classes_list = UnderstandUtility.get_project_classes_longnames_java(db=db)
print('-' * 75)
print('@understand', len(set(classes_list)), set(classes_list))
return classes_list
@classmethod
def write_project_classes(cls, project_name: str = None, db=None, csv_path: str = None):
classes = cls.extract_project_classes(db=db)
df = pd.DataFrame(columns=['Project', 'Class', 'Line', 'Branch', 'Mutation', 'Output', 'Exceptions', 'Tests'])
df['Project'] = [project_name for i in range(0, len(classes))]
df['Class'] = classes
df.to_csv(csv_path, index=False)
@classmethod
def read_project_classes(cls, project_name: str = None, db=None, df: pd.DataFrame = None):
df1 = df.loc[df.Project == project_name]
class_entities = list()
for index, row in df1.iterrows():
# Find relevant class entity
class_entity_ = UnderstandUtility.get_class_entity_by_name(db=db, class_name=row['Class'])
if class_entity_ is not None:
method_list = UnderstandUtility.get_method_of_class_java2(db=db, class_entity=class_entity_)
if method_list is not None:
class_entities.append(class_entity_)
else:
# We do not need a class without any method!
warnings.warn('Requested class with name "{0}" does not have any method!'.format(row['Class']))
else:
# if class not found it may be an enum, or interface so we simply ignore it for metric computation
warnings.warn('Requested class with name "{0}" was not found int the project!'.format(row['Class']))
return class_entities
@classmethod
def extract_metrics_and_coverage_all(cls, udbs_path: str = r'sf110_without_test',
class_list_csv_path: str = r'runtime_result/evosuit160_sf110_result_html_with_project.csv',
csvs_path: str = r'sf110_csvs_without_test_e3/',
):
df = pd.read_csv(class_list_csv_path, delimiter=',', index_col=False)
files = [f for f in os.listdir(udbs_path) if os.path.isfile(os.path.join(udbs_path, f))]
t = list()
p = list()
for i, f in enumerate(files):
print('processing understand db file {0}:'.format(f))
db = understand.open(os.path.join(udbs_path, f))
# cls.check_compute_metrics_by_class_list(project_name=f[:-4], database=db, class_list=df, csv_path=csvs_path)
# t.append(threading.Thread(target=cls.check_compute_metrics_by_class_list, args=(f[:-4], db, df, csvs_path, )))
# t[i].start()
# p.append(multiprocessing.Process(target=cls.check_compute_metrics_by_class_list, args=(f[:-4], db, df, csvs_path, )))
# p[i].start()
cls.compute_metrics_by_class_list(project_name=f[:-4], database=db, class_list=df, csv_path=csvs_path)
print('processing understand db file {0} was finished'.format(f))
db.close()
@classmethod
def check_compute_metrics_by_class_list(cls, project_name: str = None, database=None, class_list=None,
csv_path=None):
class_entities = cls.read_project_classes(project_name=project_name, db=database, df=class_list, )
print('Number of classes in {0}: {1}'.format(project_name, len(class_entities)))
columns = ['Project', 'NumberOfClass']
columns.extend(TestabilityMetrics.get_all_metrics_names())
dummy_data = [0 for i in range(0, len(columns) - 2)]
dummy_data.insert(0, project_name)
dummy_data.insert(1, len(class_entities))
df = pd.DataFrame(data=[dummy_data], columns=columns)
# print(df)
# print(columns)
df.to_csv(csv_path + project_name + '.csv', index=False, )
@classmethod
def compute_metrics_by_class_list(cls, project_name: str = None, database=None, class_list=None, csv_path=None):
all_class_metrics_value = list()
# print('Calculating project metrics')
# project_metrics_dict = TestabilityMetrics.compute_java_project_metrics(db=database)
# if project_metrics_dict is None:
# raise TypeError('No project metrics for project {} was found!'.format(project_name))
class_entities = cls.read_project_classes(project_name=project_name, db=database, df=class_list, )
for class_entity in class_entities:
one_class_metrics_value = [class_entity.longname()]
# print('Calculating package metrics')
package_metrics_dict = TestabilityMetrics.compute_java_package_metrics(db=database,
class_name=class_entity.longname())
if package_metrics_dict is None:
raise TypeError('No package metric for item {} was found'.format(class_entity.longname()))
# print('Calculating class lexicon metrics')
class_lexicon_metrics_dict = TestabilityMetrics.compute_java_class_metrics_lexicon(db=database,
entity=class_entity)
if class_lexicon_metrics_dict is None:
raise TypeError('No class lexicon metric for item {} was found'.format(class_entity.longname()))
# print('Calculating class ordinary metrics')
class_ordinary_metrics_dict = TestabilityMetrics.compute_java_class_metrics2(db=database,
entity=class_entity)
if class_ordinary_metrics_dict is None:
raise TypeError('No class ordinary metric for item {} was found'.format(class_entity.longname()))
# Write project_metrics_dict
# for metric_name in TestabilityMetrics.get_project_metrics_names():
# one_class_metrics_value.append(project_metrics_dict[metric_name])
# Write package_metrics_dict
for metric_name in TestabilityMetrics.get_package_metrics_names():
one_class_metrics_value.append(package_metrics_dict[metric_name])
# Write class_lexicon_metrics_dict
for metric_name in TestabilityMetrics.get_class_lexicon_metrics_names():
one_class_metrics_value.append(class_lexicon_metrics_dict[metric_name])
# Write class_ordinary_metrics_dict
for metric_name in TestabilityMetrics.get_class_ordinary_metrics_names():
one_class_metrics_value.append(class_ordinary_metrics_dict[metric_name])
all_class_metrics_value.append(one_class_metrics_value)
columns = ['Class']
columns.extend(TestabilityMetrics.get_all_metrics_names())
df = pd.DataFrame(data=all_class_metrics_value, columns=columns)
print('df for class {0} with shape {1}'.format(project_name, df.shape))
df.to_csv(csv_path + project_name + '.csv', index=False)
# -------------------------------------------
@classmethod
def intersection(cls, list1, list2):
return list(set(list1) & set(list2))
@classmethod
def extract_coverage_before_and_after_refactoring(cls, path_before: str = None, path_after: str = None):
df_before = pd.read_csv(path_before, delimiter=',', index_col=False, encoding='utf8', )
df_after = pd.read_csv(path_after, delimiter=',', index_col=False, encoding='utf8')
df = pd.DataFrame()
df['Class'] = df_before['TARGET_CLASS']
# quit()
df['CoverageBeforeRefactor'] = df_before['Coverage']
coverage_after_list = list()
for i, class_ in enumerate(df['Class']):
row = df_after.loc[df_after['TARGET_CLASS'] == str(class_)]
# print(row)
if row is None or row.empty:
coverage_after_list.append(None)
continue
coverage_after_list.append(row.iloc[0]['Coverage'])
print('{0}, class_: {1}, coverage_after: {2}'.format(i + 2, class_, row.iloc[0]['Coverage']))
df['CoverageAfterRefactor'] = coverage_after_list
df.to_csv('refactors/mango_statistics_both.csv', index=False)
# -------------------------------------------
# Create complete dataset API
@classmethod
def create_complete_dataset(cls, separated_csvs_root: str = r'sf110_csvs_without_test_e3/',
complete_csv_root: str = r'dataset06/',
complete_csv_file: str = r'DS060Raw.csv'):
"""
This method merge all separated csv files which belongs to each project
into one csv file for using with machine learning classifiers.
:param complete_csv_file:
:param separated_csvs_root:
:param complete_csv_root:
:return:
"""
project_high_level_info = list()
columns = ['Class']
columns.extend(TestabilityMetrics.get_all_metrics_names())
df = pd.DataFrame(columns=columns)
for filename in os.listdir(separated_csvs_root):
try:
df2 = pd.read_csv(separated_csvs_root + filename, delimiter=',', index_col=False)
except:
raise ValueError('FFF' + filename)
df2.columns = [column.replace(' ', '') for column in df2.columns]
df = df.append(df2, ignore_index=True)
project_name = filename.split('_')[1].capitalize()
print(filename)
project_high_level_info.append([project_name[:-4],
'-',
df2['Project_CountDeclFile'][0],
df2['Project_CountLineCode'][0],
])
df3 = pd.DataFrame(data=project_high_level_info, columns=['Project', 'Domain', 'Java files', 'Line of codes'])
print(df3.to_markdown(index=False))
quit()
df.to_csv(complete_csv_root + complete_csv_file, index=False)
# -------------------------------------------
# Data preprocessing and cleaning API
# Step 0: Filter irrelevant samples (rows)
@classmethod
def remove_irrelevant_samples(cls, csv_path, csv_new_path):
df = pd.read_csv(csv_path, delimiter=',', index_col=False)
print('df:', df.shape)
df1 = df.loc[
(df.Tests <= 0)
| (df.CSORD_NumberOfClassInItsFile >= 2)
| (df.CSORD_CountLineCode < 5)
| (df.Label_Combine1 <= 0)
]
print('df1 Removed:', df1.shape)
df1 = pd.concat([df, df1]).drop_duplicates(keep=False)
print('df1:', df1.shape)
df1.to_csv(csv_new_path, index=False)
@classmethod
def remove_dataclasses(cls, csv_path, csv_new_path):
df = pd.read_csv(csv_path, delimiter=',', index_col=False)
print('df:', df.shape)
df['NumberOfMethod'] = df['CSORD_CountDeclInstanceMethod'] + df['CSORD_CountDeclClassMethod']
df1 = df.loc[
(df['NumberOfMethod'] <= 0)
]
print('df1 Removed:', df1.shape)
# df1.to_csv('SF110_data_classes.csv', index=False)
# quit()
df1 = pd.concat([df, df1]).drop_duplicates(keep=False)
df1['NumberOfMethodNAMM'] = df1['CSORD_CSNOMNAMM'] - df1['CSORD_NumberOfClassConstructors']
df2 = df1.loc[
(df1['NumberOfMethodNAMM'] <= 0)
# & (df1['CSORD_NumberOfClassConstructors'] > 0)
& (
(df1['CSORD_CountDeclInstanceVariable'] >= 1)
| (df1['CSORD_CountDeclClassVariable'] >= 1)
)
& (df1['CSORD_MaxCyclomatic'] <= 1)
]
print('df2 Removed:', df2.shape)
df2 = pd.concat([df1, df2]).drop_duplicates(keep=False)
df2['CSORD_CSNOMNAMM'] = df2['NumberOfMethodNAMM']
df2['CSORD_NumberOfClassInItsFile'] = df2['NumberOfMethod']
df2.rename(columns={'CSORD_NumberOfClassInItsFile': 'CSORD_NumberOfMethods'}, inplace=True)
df2.drop(columns=['NumberOfMethodNAMM', 'NumberOfMethod'], inplace=True)
print('df2:', df2.shape)
df2.to_csv(csv_new_path, index=False)
@classmethod
def remove_high_coverage_classes_samples(cls, csv_path, csv_new_path):
df = pd.read_csv(csv_path, delimiter=',', index_col=False)
print('df:', df.shape)
df1 = df.loc[(df.TestabilityNominal == 'Coverageable')
]
print('df1 Removed:', df1.shape)
df1 = pd.concat([df, df1]).drop_duplicates(keep=False)
print('df1:', df1.shape)
df1.to_csv(csv_new_path, index=False)
# Step 1:
# Step 1.1 Remove zero columns
@classmethod
def remove_zero_column(cls, path: str = None, path_new: str = None):
pd.set_option('display.max_rows', None, 'display.max_columns', None)
pd.options.display.max_colwidth = 1000
df = pd.read_csv(path, delimiter=',', index_col=False)
print(type(df))
df.columns = [column.replace(' ', '_') for column in df.columns]
df = df.loc[:, (df != 0).any(axis=0)]
print(df.shape)
# print(list(df.columns))
columns_with_min_in_their_names = [i for i in df.columns if 'Min' in i]
print('columns_with_min_in_their_names len:', len(columns_with_min_in_their_names))
df2 = df.drop(columns=columns_with_min_in_their_names)
# Print and save new dataset as csv and html
print(df2.shape)
df2.to_csv(path_new, index=False)
# Step 1.2
@classmethod
def remove_zero_variance_column(cls, path: str = None, path_new: str = None):
df1 = pd.read_csv(path, delimiter=',', index_col=False)
df = df1.iloc[:, 1:-5]
all_cols = df.columns
# 1. Drop low_variety_cols
# df2 = df.loc[:, df.var() == 0.0]
low_variety_cols = []
for col in df.columns:
if len(df[col].unique()) < 10:
df.drop(col, inplace=True, axis=1)
low_variety_cols.append(col)
print('low variety cols: {0}: {1}'.format(len(low_variety_cols), low_variety_cols))
# 2. Drop low_variance_cols
low_variance_cols = []
for col in df.columns:
# print(df[col].var())
if df[col].var() < 0.1:
df.drop(col, inplace=True, axis=1)
low_variance_cols.append(col)
print('low_variance_cols: {0}: {1}'.format(len(low_variance_cols), low_variance_cols))
# 3. Drop high_variance_cols
""""
high_variance_cols = []
for col in df.columns:
# print(df[col].var())
if df[col].var() >= 100e9:
df.drop(col, inplace=True, axis=1)
high_variance_cols.append(col)
print('high_variance_cols : {0}: {1}'.format(len(high_variance_cols), high_variance_cols))
# quit()
"""
# 4. Drop many_zero_cols
many_zero_cols = []
for col in df.columns:
# print(df[col].var())
# print((df[col] == 0).sum(), len(df.index))
if (df[col] == 0).sum() >= round(len(df.index) * 4 / 5.):
df.drop(col, inplace=True, axis=1)
many_zero_cols.append(col)
print('many_zero_cols: {0}: {1}'.format(len(many_zero_cols), many_zero_cols))
print(df.shape)
df.insert(loc=0, column='Class', value=df1['Class'])
df['Label_LineCoverage'] = df1['Label_LineCoverage']
df['Label_BranchCoverage'] = df1['Label_BranchCoverage']
df['Label_MutationScore'] = df1['Label_MutationScore']
df['Label_Combine1'] = df1['Label_Combine1']
df['Label_Combine2'] = df1['Label_Combine2']
print('Before dropping many zero rows:', df.shape)
# 5. Drop many_zero_rows
print('-' * 25)
many_zero_rows = []
for index, item in ((df == 0).sum(1)).iteritems():
if item >= round((len(df.columns) - 6) * 1 / 2):
# print(index, item)
many_zero_rows.append([index, item])
df.drop(index=index, axis=0, inplace=True)
print('many_zero_rows {0}: {1}'.format(len(many_zero_rows), many_zero_rows[0]))
print('After dropping many zero rows:', df.shape)
# 6. Statistics
print('Total number of zeros: {0}'.format((df == 0).sum(1).sum()))
print('Total number of non zeros: {0}'.format((df != 0).sum(1).sum()))
print('Total number of items: {0}'.format(len(df.columns) * len(df.index)))
print('Portion of zeros: {0}'.format(((df == 0).sum(1).sum()) / (len(df.columns) * len(df.index))))
# non_constant_cols = df.columns
# constant_col = (set(all_cols)).difference(set(non_constant_cols))
# print(len(constant_col))
# print(constant_col)
df.to_csv(path_new, index=False)
# Step 2: Discretization (Convert numerical branch coverage to nominal coverageability labels)
# Step 2.1:
@classmethod
def discretize(cls, path: str = None, path_new: str = None):
"""
https://pbpython.com/pandas-qcut-cut.html
quantile_based_discretization
:param path:
:param path_new:
:return:
"""
data_frame = pd.read_csv(path,
delimiter=',',
index_col=False,
# usecols=[0,2]
)
# data_frame.columns = [column.replace(' ', '_') for column in data_frame.columns]
# print(data_frame)
# quit()
# Define fine-grain coverageability nominal labels (five category)
# coverageability_labels = ['VeryLow', 'Low', 'Mean', 'High', 'VeryHigh']
coverageability_labels = ['Low', 'Moderate', 'High', ]
# bins = 5
# bins = pd.IntervalIndex.from_tuples([(-0.001, 0.30), (0.30, 0.70), (0.70, 1.001)])
bins = [-0.001, 30.0, 70.0, 100.001]
bins = [-0.001, 25.0, 75.0, 100.001]
# Add coverageability column
# data_frame['CoverageabilityNominal'] = pd.cut(data_frame.loc[:, ['Label_BranchCoverage']].T.squeeze(),
# bins=bins,
# labels=coverageability_labels,
# right=True
# )
# print(pd.cut(data.loc[:, ['_branch_coverage']].T.squeeze(),
# bins=5,
# labels=['VeryLow', 'Low', 'Mean', 'High', 'VeryHigh']
# ).value_counts())
"""
data_frame['CoverageabilityNominalCombined'] = pd.cut(data_frame.loc[:, ['Label_Combine1']].T.squeeze(),
bins=bins,
labels=coverageability_labels,
right=True
)
"""
# testability_labels = ['NonCoverageable', 'Coverageable']
testability_labels = ['Low', 'High']
bins = [-0.001, 50.0, 100.001]
data_frame['LineCategorical'] = pd.cut(data_frame.loc[:, ['Label_LineCoverage']].T.squeeze(),
bins=bins,
labels=testability_labels,
right=True
)
testability_labels_binary = [0, 1]
data_frame['BranchCategorical'] = pd.cut(data_frame.loc[:, ['Label_BranchCoverage']].T.squeeze(),
bins=2,
labels=testability_labels,
)
print(data_frame)
# Remove extra columns
columns_list = ['Label_LineCoverage', 'Label_BranchCoverage', 'Label_MutationScore', 'Label_Combine1',
'Label_Combine2']
columns_list = ['Label_Combine1']
# data_frame_dropped = data_frame.drop(columns_list, axis=1)
# Print and save new dataset as csv and html
# print(data_frame_dropped)
# path_new = r'es_complete_dataset_all_1_0_6_without_test_93col_discretize_91col_15417.csv'
# print(data_frame_dropped.shape)
# data_frame_dropped.to_csv(path_new, index=False)
data_frame.to_csv(path_new, index=False)
@classmethod
def label_with_line_and_branch(cls, path: str = None, path_new: str = None):
df = pd.read_csv(path, delimiter=',', index_col=False, )
merged_label = list()
for index, row in df.iterrows():
if row['LineCategorical'] == 'Low' and row['BranchCategorical'] == 'Low':
merged_label.append('LowLow')
elif row['LineCategorical'] == 'Low' and row['BranchCategorical'] == 'High':
merged_label.append('LowHigh')
elif row['LineCategorical'] == 'High' and row['BranchCategorical'] == 'High':
merged_label.append('HighHigh')
else:
merged_label.append('HighLow')
df['LabelMerged'] = merged_label
df.to_csv(path_new, index=False)
# Step 2.2
# Discretize variable into equal-sized buckets based
@classmethod
def discretize_q(cls, path: str = None, path_new: str = None):
"""
quantile_based_discretization
:param path:
:param path_new:
:return:
"""
data_frame = pd.read_csv(path, delimiter=',', index_col=False, )
# data_frame['Label_BranchCoverage'].replace(to_replace=0, value=np.nan, inplace=True)
# Define fine-grain coverageability nominal labels (five category)
coverageability_labels = ['VeryLow', 'Low', 'Mean', 'High', 'VeryHigh']
coverageability_labels = ['Low', 'Moderate', 'High', ]
# Add coverageability column
data_frame['CoverageabilityNominalCombined'] = pd.qcut(data_frame.Label_Combine1,
q=3,
# labels=coverageability_labels,
# duplicates='drop'
)
# print(pd.cut(data.loc[:, ['_branch_coverage']].T.squeeze(),
# bins=5,
# labels=['VeryLow', 'Low', 'Mean', 'High', 'VeryHigh']
# ).value_counts())
print(data_frame)
# quit()
testability_labels = ['NonTestable', 'Testable']
data_frame['TestabilityNominal'] = pd.qcut(data_frame.Label_Combine1,
q=2,
# labels=testability_labels
)
"""
testability_labels_binary = [0, 1]
data_frame['TestabilityBinary'] = pd.qcut(data_frame.Label_BranchCoverage,
q=2,
labels=testability_labels_binary
)
"""
print(data_frame.shape)
# Remove extra columns
columns_list = ['Label_LineCoverage', 'Label_BranchCoverage', 'Label_MutationScore', 'Label_Combine1',
'Label_Combine2']
columns_list = ['Label_Combine1', ]
data_frame_dropped = data_frame.drop(columns_list, axis=1)
# Print and save new dataset as csv and html
print(data_frame_dropped)
# path_new = r'es_complete_dataset_all_1_0_6_without_test_93col_discretize_91col_15417.csv'
# print(data_frame_dropped.shape)
data_frame_dropped.to_csv(path_new, index=False)
# data_frame.to_csv(path_new, index=False)
# Step 3: Remove data classes
@classmethod
def mitigate_imbalanced(cls, path: str = None, path_new: str = None):
"""
:param path: The path of complete dataset (raw data with 3 tagged column)
:param path_new: The path of new dataset
:return:
"""
# pd.set_option('display.max_rows', None, 'display.max_columns', None)
# pd.options.display.max_colwidth = 1000
df = pd.read_csv(path, delimiter=',',
index_col=False,
# usecols=[0,2],
)
# df.columns = [column.replace(' ', '_') for column in df.columns]
print('df0:', df.shape)
# df = pd.DataFrame(data, columns=['class', "_Project_CountDeclClass"])
# df = pd.DataFrame(data=data,)
# print(df)
# for i, j in df.iterrows():
# print(i, j)
# print()
# print(df.isna())
# print(df.query("_branch_coverage==1.0"))
# print(data.columns)
# print(data.loc[1:50, "_branch_coverage")
# data.filter()
# data1 = data["_branch_coverage"] == 1.
# data1 = data1[0:50, "_branch_coverage"]
df1 = df.loc[((df['Label_Combine1'] < 50.0) & (df.CSORD_SumCyclomatic <= 1))]
# data1 = data1.filter(like='UnifyCase', axis=0)
# data1 = data1[data1.ClassOrdinary_MaxNesting == data1.ClassOrdinary_MaxNesting.max()]
# data1 = data1[data1.ClassOrdinary_AvgCyclomatic == data1.ClassOrdinary_AvgCyclomatic.max()]
# data1 = data1[data1.ClassOrdinary_SumCyclomatic == data1.ClassOrdinary_SumCyclomatic.max()]
# data1 = data1[data1.ClassOrdinary_CountLineCode == data1.ClassOrdinary_CountLineCode.max()]
df1 = pd.concat([df, df1]).drop_duplicates(keep=False)
print('df1:', df1.shape)
# Put data classes into df2
df2 = df1.loc[(df1['Label_Combine1'] >= 100.0)
# & (data['ClassOrdinary_CountLineCode'] == 10e6)
# & (data['class'].str.contains('weka'))
# & (data.ClassOrdinary_MaxNesting == data.ClassOrdinary_MaxNesting.max())
& (df1.CSORD_SumCyclomatic <= 1)
# & (df1.CSORD_MaxCyclomatic <= 1)
# & (data['ClassOrdinary_CountDeclMethodAll'] <= 5)
# & (data.ClassOrdinary_CountDeclMethod <= 0)
# & (data.ClassOrdinary_MaxNesting == 0)
]
# Remove data classes from df1 and put result in df2
df2 = pd.concat([df2, df1]).drop_duplicates(keep=False)
print('df2:', df2.shape)
# Put data classes into df3
df3 = df2.loc[(df2['Label_Combine1'] >= 100.0)
& (df2.CSORD_CountLineCodeExe <= 1)
]
# Remove data classes from df1 and put result in df3
df3 = pd.concat([df3, df2]).drop_duplicates(keep=False)
print('df3:', df3.shape)
# Put data classes into df4
df4 = df3.loc[(df3['Label_Combine1'] >= 100.0)
& (df3.CSLEX_NumberOfConditionalJumpStatements <= 1)
]
# Remove data classes from df1 and put result in df2
df4 = pd.concat([df4, df3]).drop_duplicates(keep=False)
print('df4:', df4.shape)
print('number of removed samples (data classes):', len(df.index) - len(df4.index))
"""
# Put data classes into df5
df5 = df4.loc[(df4['Label_BranchCoverage'] >= 100.0)
& (df4.CSORD_CountLineCodeExe <= 1)
]
df5 = pd.concat([df5, df4]).drop_duplicates(keep=False)
print('df5:', df5.shape)
# Put data classes into df6
df6 = df5.loc[(df5['Label_BranchCoverage'] >= 100.0)
& (df5.CSORD_CountDeclInstanceMethod <= 1)
]
df6 = pd.concat([df6, df5]).drop_duplicates(keep=False)
print('df6:', df6.shape)
# !Mitigate Zero coverages
df7 = df6.loc[(df6['Label_BranchCoverage'] < 5.0) &
(df6.CSORD_CountLineCodeExe <= 5)
]
df7 = pd.concat([df7, df6]).drop_duplicates(keep=False)
print('df7:', df7.shape)
# !
df8 = df7.loc[(df7['Label_BranchCoverage'] >= 100.0) &
(df7.CSORD_CountStmtExeNAMM <= 1)
]
df8 = pd.concat([df8, df7]).drop_duplicates(keep=False)
print('df8:', df8.shape)
# --
df9 = df8.loc[(df8['Label_BranchCoverage'] >= 100.0) &
(df8.CSORD_CSNOMNAMM <= 0)
]
df9 = pd.concat([df9, df8]).drop_duplicates(keep=False)
print('df9:', df9.shape)
"""
# Temporary code for experiment
# df3 = df2.loc[
# (df['Label_BranchCoverage'] < 1.)
# & (df.ClassOrdinary_CountDeclMethodAll >= 0)
# ]
# print(df3.shape)
# df3 = df2.loc[:, ['Class',
# 'ClassOrdinary_CountLineCode',
# 'ClassOrdinary_AvgCyclomatic',
# 'ClassOrdinary_SumCyclomatic',
# 'ClassOrdinary_MaxNesting',
# 'Label_BranchCoverage']]
# print('df3:', df3.shape)
# col = data['ClassOrdinary_CountLineCode']
# print('max col is in row {0} with value {1} and name {3}'.format(col.idxmax(), col.max(), col[col.idxmax(), :]))
# print(data.max())
# print(data[data.ClassOrdinary_CountLineCode ==
# data.ClassOrdinary_CountLineCode.max()][['Class', 'Label_LineCoverage']])
# Print and save new dataset as csv and html
# df2.to_csv(path_new,
# header=True,
# index=True,
# index_label='index')
df4.to_csv(path_new, index=False)
# Step 4: Remove outlier records based on z_scores of all features (base data preparing has finished)
# Step 4.1: Remove outliers with z-score
@classmethod
def remove_outliers(cls, path: str = None, path_new: str = None):
# pd.set_option('display.max_rows', None, 'display.max_columns', None)
pd.options.display.max_colwidth = 1000
df = pd.read_csv(path,
delimiter=',',
index_col=False,
# usecols=[0,2]
)
df.columns = [column.replace(' ', '_') for column in df.columns]
df2 = df.iloc[:, 1:-10]
"""
non_features_columns = ['Class',
'Label_BranchCoverage',
'CoverageabilityNominal',
'TestabilityNominal', 'TestabilityBinary']
df2 = df.drop(columns=non_features_columns)
# New for version 0.3.0 (ignore in version 4)
# We include only primary metrics set in outlier removing process
# Remove systematically generated metrics from data frame
p_names = set(TestabilityMetrics.get_all_primary_metrics_names())
s_names = set(TestabilityMetrics.get_all_metrics_names())
print('p_names', len(p_names))
print('s_names', len(s_names))
systematically_generated_metric_list = s_names.difference(p_names)
print(systematically_generated_metric_list)
print('len systematically_generated_metric_list',
len(systematically_generated_metric_list))
systematically_generated_metric_list = [i for i in systematically_generated_metric_list if 'Min' not in i]
print('len systematically_generated_metric_list', len(systematically_generated_metric_list))
df2 = df2.drop(columns=list(systematically_generated_metric_list))
print(df2.columns)
print(df2.shape)
# quit()
"""
z_scores = stats.zscore(df2)
# print(z_scores)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
df.drop(columns=['Label_MutationScore', 'Output', 'Exceptions', 'Label_Combine2', 'Label_MaxLineAndBranch',
'Label_MinLineAndBranch'], inplace=True)
df3 = df[filtered_entries] # we can use df or df2
print('df3:', df3.shape)
# Print and save new dataset as csv and html
# path_new = r'es_complete_dataset_all_1_0_6_without_test_93col_discretize_91col_15417_outlier_removed.csv'
df3.to_csv(path_new, index=False)
@classmethod
def remove_outliers2(cls, path: str = None, path_new: str = None):
df = pd.read_csv(path, delimiter=',', index_col=False, )
df.drop(columns=['Label_MutationScore', 'Output', 'Exceptions', 'Label_Combine2', 'Label_MaxLineAndBranch',
'Label_MinLineAndBranch'], inplace=True)
df.to_csv(path_new, index=False)
# Step 4.2: Remove outliers with z-score
@classmethod
def remove_outliers_with_lof(cls, path: str = None, path_new: str = None):
# https://machinelearningmastery.com/model-based-outlier-detection-and-removal-in-python/
# https://scikit-learn.org/stable/auto_examples/ensemble/plot_isolation_forest.html
# pd.set_option('display.max_rows', None, 'display.max_columns', None)
pd.options.display.max_colwidth = 1000
df = pd.read_csv(path, delimiter=',', index_col=False, )
df.columns = [column.replace(' ', '_') for column in df.columns]
X1 = df.iloc[:, 1:-10]
data = df.values
X = data[:, 1:-10]
y0 = data[:, 0]
y1 = data[:, -10:]
od = LocalOutlierFactor(n_neighbors=15, leaf_size=30)
# od = IsolationForest(n_estimators=225, bootstrap=True)
yhat = od.fit_predict(X)
# select all rows that are not outliers
mask = yhat != -1
X, y0, y1 = X[mask, :], y0[mask], y1[mask]
# summarize the shape of the updated training dataset
# print(X.shape, y0.shape, y1.shape)
df2 = pd.DataFrame(X, columns=X1.columns)
df2.insert(loc=0, column='Class', value=y0)
df2['Label_LineCoverage'] = y1[:, 0]
df2['Label_BranchCoverage'] = y1[:, 1]
# df2['Label_MutationScore'] = y1[:, 2]
# df2['Output'] = y1[:, 3]
# df2['Exceptions'] = y1[:, 4]
df2['Tests'] = y1[:, 5]
df2['Label_Combine1'] = y1[:, 6]
# df2['Label_Combine2'] = y1[:, 7]
# df2['Label_MaxLineAndBranch'] = y1[:, 8]
# df2['Label_MinLineAndBranch'] = y1[:, 9]
print(df2)
print('number of outliers', len(df.index) - len(df2.index))
df2.to_csv(path_new, index=False)
df.drop(columns=['Label_MutationScore', 'Output', 'Exceptions', 'Label_Combine2', 'Label_MaxLineAndBranch',
'Label_MinLineAndBranch'], inplace=True)
df3 = pd.concat([df, df2]).drop_duplicates(keep=False)
df3.to_csv(path_new[:-4] + '_outliers_only.csv', index=False, )
# Step 5: Training set/ testing set split and save
@classmethod
def split_dataset_base(cls, path: str = None, path_new: str = None):
df = pd.read_csv(path, delimiter=',', index_col=False)
# X = df.iloc[:, 1:-4]
# y = df.iloc[:, -3]
X = df.iloc[:, 1:-1]
y = df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.25,
random_state=42,
stratify=y
)
# print(X_train.head(), y_train.head())
df_train = pd.DataFrame(X_train)
df_train['CoverageabilityNominalCombined'] = y_train
print(df_train)
df_test = | pd.DataFrame(X_test) | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from pyrolite.util.math import *
from pyrolite.util.synthetic import random_cov_matrix
from sympy import tensorcontraction
class TestAugmentedCovarianceMatrix(unittest.TestCase):
def setUp(self):
self.mean = np.random.randn(5)
self.cov = random_cov_matrix(5)
def test_augmented_covariance_matrix(self):
ACM = augmented_covariance_matrix(self.mean, self.cov)
class TestInterpolateLine(unittest.TestCase):
def setUp(self):
self.x, self.y = np.linspace(0.0, 10.0, 10), np.random.randn(10)
def test_default(self):
# should do no interpoltion
ix, iy = interpolate_line(self.x, self.y)
self.assertTrue(isinstance(ix, np.ndarray))
self.assertTrue(ix.shape == self.x.shape)
def test_n(self):
for n in [2, 5]:
ix, iy = interpolate_line(self.x, self.y, n=n)
self.assertTrue(isinstance(ix, np.ndarray))
self.assertTrue(
iy.shape[-1] == self.y.shape[-1] + (self.y.shape[-1] - 1) * n
)
class TestIndexesRanges(unittest.TestCase):
def setUp(self):
self.x = np.linspace(1, 10, 10)
def test_linspc(self):
spc = linspc_(self.x.min(), self.x.max())
self.assertTrue(np.isclose(spc[0], self.x.min()))
self.assertTrue(np.isclose(spc[-1], self.x.max()))
def test_logspc(self):
spc = logspc_(self.x.min(), self.x.max())
self.assertTrue(np.isclose(spc[0], self.x.min()))
self.assertTrue(np.isclose(spc[-1], self.x.max()))
def test_linrng_default(self):
# should be equivalent to linspace where all above zero
rng = linrng_(self.x)
self.assertTrue(isinstance(rng, tuple))
self.assertTrue(np.isclose(rng[0], self.x.min()))
self.assertTrue(np.isclose(rng[1], self.x.max()))
def test_logrng_default(self):
rng = logrng_(self.x)
self.assertTrue(isinstance(rng, tuple))
self.assertTrue(np.isclose(rng[0], self.x.min()))
self.assertTrue(np.isclose(rng[1], self.x.max()))
class TestGridFromRanges(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(10, 2)
def test_default(self):
out = grid_from_ranges(self.x)
# default bins = 100
self.assertTrue(out[0].size == 100 ** 2)
def test_bins(self):
for bins in [2, 10, 50]:
out = grid_from_ranges(self.x, bins=bins)
class TestIsClose(unittest.TestCase):
def test_non_nan(self):
self.assertTrue(isclose(1.0, 1.0))
self.assertTrue(isclose(0.0, 0.0))
self.assertTrue(isclose(np.array([1.0]), np.array([1.0])))
self.assertTrue(isclose(np.array([0.0]), np.array([0.0])))
def test_nan(self):
self.assertTrue(isclose(np.nan, np.nan))
self.assertTrue(isclose(np.array([np.nan]), np.array([np.nan])))
class TestIsNumeric(unittest.TestCase):
"""
Tests is_numeric function.
"""
def test_numeric_collection_instances(self):
for obj in [
np.array([]),
pd.Series([], dtype="float32"),
pd.DataFrame([], dtype="float32"),
]:
with self.subTest(obj=obj):
self.assertTrue(is_numeric(obj))
def test_numeric_collection_classes(self):
for obj in [np.ndarray, pd.Series, pd.DataFrame]:
with self.subTest(obj=obj):
self.assertTrue(is_numeric(obj))
def test_number_instances(self):
for obj in [0, 1, 1.0, 10.0, np.nan, np.inf]:
with self.subTest(obj=obj):
self.assertTrue(is_numeric(obj))
def test_number_classes(self):
for obj in [np.float, np.int, np.bool, float, int, bool]:
with self.subTest(obj=obj):
self.assertTrue(is_numeric(obj))
def test_bool(self):
for obj in [True, False]:
with self.subTest(obj=obj):
self.assertTrue(is_numeric(obj))
def test_non_numeric_collection_instances(self):
for obj in [list(), dict(), set()]:
with self.subTest(obj=obj):
self.assertFalse(is_numeric(obj))
def test_non_numeric_collection_classes(self):
for obj in [list, dict, set]:
with self.subTest(obj=obj):
self.assertFalse(is_numeric(obj))
class TestRoundSig(unittest.TestCase):
"""
Tests round_sig function.
round_sig(x, sig=2)
"""
def setUp(self):
self.values = [0, 1, 1.1, 2.111, 1232.01, 100000.00, 10000.0001, np.nan, np.inf]
self.expect2 = np.array([0, 1, 1.1, 2.1, 1200, 100000, 10000, np.nan, np.inf])
def test_sigs(self):
vals = np.array(self.values)
for sig in range(5):
with self.subTest(sig=sig):
rounded = round_sig(vals, sig=sig)
self.assertTrue((significant_figures(rounded) <= sig).all())
def test_list(self):
vals = list(self.values)
rounded = round_sig(vals, sig=2)
self.assertTrue(
np.isclose(
rounded, self.expect2.reshape(rounded.shape), equal_nan=True
).all()
)
def test_array(self):
vals = np.array(self.values)
rounded = round_sig(vals, sig=2)
self.assertTrue(
np.isclose(
rounded, self.expect2.reshape(rounded.shape), equal_nan=True
).all()
)
def normal_seriesies(self):
vals = pd.Series(self.values, dtype="float64")
rounded = round_sig(vals, sig=2)
self.assertTrue(
np.isclose(
rounded, self.expect2.reshape(rounded.shape), equal_nan=True
).all()
)
def test_dataframe(self):
vals = pd.DataFrame(self.values, dtype="float64")
rounded = round_sig(vals, sig=2)
self.assertTrue(
np.isclose(
rounded, self.expect2.reshape(rounded.shape), equal_nan=True
).all()
)
class TestSignificantFigures(unittest.TestCase):
"""
Tests significant_figures function.
significant_figures(n, unc=None, max_sf=20)
"""
def setUp(self):
self.values = [0, 1, 1.1, 2.111, 1232.01, 100000.00, 10000.0001, np.nan, np.inf]
self.unc = [0, 0.1, 0.5, 1.0, 10, 1, 100, np.nan, np.inf]
self.expect = np.array([0, 1, 2, 4, 6, 1, 9, 0, 0])
self.unc_expect = np.array([0, 2, 2, 1, 3, 6, 3, 0, 0])
def test_unc(self):
for vals, unc, expect in [
(self.values, self.unc, self.unc_expect),
(self.values[3], self.unc[3], self.unc_expect[3]),
]:
with self.subTest(vals=vals, unc=unc):
sfs = significant_figures(vals, unc=unc)
self.assertTrue(np.allclose(sfs, expect, equal_nan=True))
def test_max_sf(self):
for max_sf in range(1, 10):
with self.subTest(max_sf=max_sf):
vals = list(self.values)
sfigs = significant_figures(vals, max_sf=max_sf)
close = np.isclose(
sfigs, self.expect.reshape(sfigs.shape), equal_nan=True
)
# where the number of sig figures is below the max
# should be as expected
self.assertTrue(close[self.expect < max_sf].all())
def test_numbers(self):
for ix, value in enumerate(self.values):
with self.subTest(value=value, ix=ix):
sf = significant_figures(value)
expect = self.expect[ix]
self.assertTrue(np.isclose(sf, expect, equal_nan=True))
def test_list(self):
vals = list(self.values)
sfigs = significant_figures(vals)
self.assertTrue(
np.isclose(sfigs, self.expect.reshape(sfigs.shape), equal_nan=True).all()
)
def test_array(self):
vals = np.array(self.values)
sfigs = significant_figures(vals)
self.assertTrue(
np.isclose(sfigs, self.expect.reshape(sfigs.shape), equal_nan=True).all()
)
def normal_seriesies(self):
vals = pd.Series(self.values)
sfigs = significant_figures(vals)
self.assertTrue(
np.isclose(sfigs, self.expect.reshape(sfigs.shape), equal_nan=True).all()
)
def test_dataframe(self):
vals = pd.DataFrame(self.values)
sfigs = significant_figures(vals)
self.assertTrue(
np.isclose(sfigs, self.expect.reshape(sfigs.shape), equal_nan=True).all()
)
class TestMostPrecise(unittest.TestCase):
"""
Tests most_precise function.
most_precise(array_like)
"""
def setUp(self):
self.values = [0, 1, 1.1, 2.111, 1232.01, 100000.00, 10000.0001, np.nan, np.inf]
self.expect = 10000.0001
def test_list(self):
vals = list(self.values)
mp = most_precise(vals)
self.assertEqual(mp, self.expect)
def test_array(self):
vals = np.array(self.values)
mp = most_precise(vals)
self.assertEqual(mp, self.expect)
def normal_seriesies(self):
vals = | pd.Series(self.values) | pandas.Series |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.normalize_data import (
remove_whitespace_from_column_names,
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
ddm2dec,
remove_empty_unnamed_columns,
normalize_columns
)
class RemoveSpacesFromColumns:
def test_replaces_leading_and_trailing_spaces_from_columns(self):
df = pd.DataFrame(columns=[' Aa', 'Bb12 ', ' Cc', 'Dd ', ' Ed Ed ', ' 12 ' ])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb12', 'Cc', 'Dd', 'Ee Ee', '12']
def test_returns_columns_if_no_leading_and_trailing_spaces(self):
df = pd.DataFrame(columns=['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed'])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb', 'Cc', 'Dd', 'Ee Ee' ]
class TestNormalizeExpeditionSectionCols:
def test_dataframe_does_not_change_if_expection_section_columns_exist(self):
data = {
"Col": [0, 1],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Sample_exist(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Label_exist(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Label(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Sample(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_missing_aw_col(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3", "10-U2H-20T-3"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_no_data(self):
data = {
"Col": [0],
"Sample": ["No data this hole"],
}
df = pd.DataFrame(data)
data = {
"Col": [0],
"Sample": ["No data this hole"],
"Exp": [None],
"Site": [None],
"Hole": [None],
"Core": [None],
"Type": [None],
"Section": [None],
"A/W": [None],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_otherwise_raise_error(self):
df = pd.DataFrame({"foo": [1]})
message = "File does not have the expected columns."
with pytest.raises(ValueError, match=message):
normalize_expedition_section_cols(df)
class TestRemoveBracketText:
def test_removes_text_within_brackets_at_end_of_cell(self):
df = pd.DataFrame(['aa [A]', 'bb [BB]', 'cc [C] ', 'dd [dd] '])
expected = pd.DataFrame(['aa', 'bb', 'cc', 'dd'])
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_at_start_of_cell(self):
df = pd.DataFrame(['[A] aa', '[BB] bb', '[C] cc ', ' [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_in_middle_of_cell(self):
df = pd.DataFrame(['aa [A] aa', 'bb [BB] bb', ' cc [C] cc ', ' dd [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_removes_letters_numbers_punctuation_within_brackets(self):
df = pd.DataFrame(['aa [A A]', 'bb [BB 123]', 'cc [123-456.] '])
expected = pd.DataFrame(['aa', 'bb', 'cc'])
remove_bracket_text(df)
assert_frame_equal(df, expected)
class TestRemoveWhitespaceFromDataframe:
def test_remove_leading_and_trailing_spaces_from_dataframe(self):
data = {
'A': ['A', 'B ', ' C', 'D ', ' Ed ', ' 1 '],
'B': ['Aa', 'Bb ', ' Cc', 'Dd ', ' Ed Ed ', ' 11 '],
}
df = pd.DataFrame(data)
data2 = {
'A': ['A', 'B', 'C', 'D', 'Ed', '1'],
'B': ['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed', '11'],
}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_ignores_numeric_columns(self):
data = {
'A': ['A', 'B ', ' C'],
'B': [1, 2, 3],
'C': [1.1, 2.2, 3.3],
}
df = pd.DataFrame(data)
data2 = {
'A': ['A', 'B', 'C'],
'B': [1, 2, 3],
'C': [1.1, 2.2, 3.3],
}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_handles_empty_strings(self):
data = {'A': ['A', 'B ', ' C', ' ']}
df = pd.DataFrame(data)
data2 = {'A': ['A', 'B', 'C', '']}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_converts_nan_to_empty_strings(self):
data = {'A': ['A', 'B ', ' C', np.nan]}
df = pd.DataFrame(data)
data2 = {'A': ['A', 'B', 'C', '']}
expected = | pd.DataFrame(data2) | pandas.DataFrame |
import numpy as np
import pandas as pd
from src.db import Player as PlayerDB
from src.db import Match as MatchDB
from src.db import Round as RoundDB
from src.db import PlayerStats as PlayerStatsDB
class Entity(object):
def __init__(self, db):
self._db = db
# set query
self.query = self._db.session.query(PlayerStatsDB).join(
PlayerStatsDB.round).join(RoundDB.match)
self.query = self.query.add_columns(MatchDB.season,
MatchDB.match_in_season,
RoundDB.round_in_match,
RoundDB.duration, MatchDB.date,
MatchDB.league)
@staticmethod
def __prettify_stat_df__(df):
df = df[[
'player_id', 'date', 'league', 'season', 'match_in_season',
'round_in_match', 'duration', 'kills', 'deaths', 'assists',
'exp_contrib', 'healing', 'damage_soaked', 'winner_team'
]]
new_column_names = {
'season': 'season',
'match_in_season': 'match',
'round_in_match': 'round'
}
return df.rename(columns=new_column_names)
@staticmethod
def __get_individual_scores__(data_series):
scores_dict = {
'kills':
3 * data_series.kills,
'deaths':
-1 * data_series.deaths,
'assists':
1.5 * data_series.assists,
'exp_per_min':
0.0075 * data_series.exp_contrib / data_series.duration,
'healing':
0.0001 * data_series.healing,
'damage_soaked':
0.0001 * data_series.damage_soaked,
'winner':
2 * data_series.winner_team,
'under_10_mins':
5 * data_series.winner_team * (data_series.duration < 10),
'under_15_mins':
2 * data_series.winner_team * (10 <= data_series.duration < 15),
}
return scores_dict
def __get_score_dict__(self, data_series):
score_dict = self.__get_individual_scores__(data_series)
score_dict['total'] = np.sum(list(score_dict.values()))
score_dict['player_id'] = data_series['player_id']
return score_dict
def get_stats(self, filter_query):
query = self.query.filter(*filter_query)
df = pd.read_sql(query.statement, query.session.bind)
return self.__prettify_stat_df__(df)
def get_scores(self, df):
scores = []
for i in range(len(df)):
score_dict = self.__get_score_dict__(df.iloc[i])
scores.append(score_dict)
return | pd.DataFrame(scores) | pandas.DataFrame |
import datetime
import pandas as pd
import numpy as np
import re
import os
def remove_blanks(df, col_name):
ctr = 0
working_df = pd.DataFrame(df)
# remove any blanks from the run
try:
while True:
value = working_df.at[ctr, col_name].lower()
if re.search("^blank\d*.*$", value) or re.search("^0$", value):
working_df.drop(labels=ctr, inplace=True)
ctr += 1
except ValueError:
pass
except KeyError:
pass
working_df = working_df.reset_index(drop=True)
print(" Done!\n")
return working_df
def remove_pools(df, col_name):
working_df = pd.DataFrame(df)
col_lst = list(working_df.columns)
size = working_df.index
new_row = []
for i in range(len(size)):
cell_val = str(working_df.iloc[i][col_lst.index(col_name)]).split("/")
cell_val = cell_val[0]
currentrow = working_df.iloc[i]
currentrow = currentrow.values.tolist()
if not ("pool" in cell_val.lower().strip() or "panel" in cell_val.lower().strip()):
new_row.append(currentrow)
working_df = | pd.DataFrame(new_row, columns=col_lst) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 14:56:33 2021
@author: parth
"""
import cv2
import numpy as np
import glob
import matplotlib.pyplot as plt
import json
import pandas as pd
import os
import sys
def read_images(imgpath):
imgpath = glob.glob(imgpath)
imgs = []
for ipath in imgpath:
img = cv2.imread(ipath)
imgs.append(img)
return imgs
def detect_faces(img):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
proc_img = img.copy()
proc_imgs=[]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.2, 5,minSize=(30,30))
for (x,y,w,h) in faces:
cv2.rectangle(proc_img,(x,y),(x+w,y+h),(0,0,255),2)
proc_img = proc_img[y:y+h,x:x+w]
proc_imgs.append(proc_img)
return proc_imgs,faces
def json_create(lstfaces,filenames,opfilename):
json_list=[]
for i in range(len(lstfaces)):
#imgname = 'img_'+str((i+1))+'.jpg'
imgname = filenames[i]
if (len(lstfaces[i])<=0):
detect=[]
else:
for ele in lstfaces[i]:
jsonobj = {}
jsonobj["iname"] = imgname
x,y,w,h=ele.astype('float32')
jsonobj["bbox"] = [x,y,w,h]
json_list.append(jsonobj)
#the result json file name
output_json = opfilename
#dump json_list to result.json
df = | pd.DataFrame(json_list) | pandas.DataFrame |
import pandas as pd
import talib
def get_factors(index,opening,closing,highest,lowest,volume,rolling=30,normalization=True, drop=False):
tmp = | pd.DataFrame() | pandas.DataFrame |
from datetime import timedelta
import numpy as np
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
option_context,
)
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_empty_frame_dtypes(self):
empty_df = DataFrame()
tm.assert_series_equal(empty_df.dtypes, Series(dtype=object))
nocols_df = DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, Series(dtype=object))
norows_df = DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, Series(object, index=list("abc")))
norows_int_df = DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, Series(np.dtype("int32"), index=list("abc"))
)
df = DataFrame({"a": 1, "b": True, "c": 1.0}, index=[1, 2, 3])
ex_dtypes = Series({"a": np.int64, "b": np.bool_, "c": np.float64})
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
tm.assert_series_equal(
df.dtypes,
Series({"a": np.float_, "b": np.float_, "c": np.float_}),
)
tm.assert_series_equal(df.iloc[:, 2:].dtypes, Series({"c": np.float_}))
tm.assert_series_equal(
df.dtypes,
Series({"a": np.float_, "b": np.float_, "c": np.float_}),
)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_dtypes_timedeltas(self):
df = DataFrame(
{
"A": Series(date_range("2012-1-1", periods=3, freq="D")),
"B": Series([timedelta(days=i) for i in range(3)]),
}
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
def test_frame_apply_np_array_return_type(self):
# GH 35517
df = DataFrame([["foo"]])
result = df.apply(lambda col: np.array("bar"))
expected = | Series(["bar"]) | pandas.Series |
"""
Import as:
import core.statistics as cstati
"""
import collections
import datetime
import functools
import logging
import math
import numbers
from typing import Any, Iterable, List, Optional, Tuple, Union, cast
import numpy as np
import pandas as pd
import scipy as sp
import sklearn.model_selection
import statsmodels
import statsmodels.api as sm
import core.finance as cfinan
import core.signal_processing as csigna
import helpers.dataframe as hdataf
import helpers.dbg as dbg
_LOG = logging.getLogger(__name__)
# #############################################################################
# Sampling statistics: start, end, frequency, NaNs, infs, etc.
# #############################################################################
def summarize_time_index_info(
srs: pd.Series,
nan_mode: Optional[str] = None,
prefix: Optional[str] = None,
) -> pd.Series:
"""
Return summarized information about datetime index of the input.
:param srs: pandas series of floats
:param nan_mode: argument for hdataf.apply_nan_mode()
:param prefix: optional prefix for output's index
:return: series with information about input's index
"""
dbg.dassert_isinstance(srs, pd.Series)
nan_mode = nan_mode or "drop"
prefix = prefix or ""
original_index = srs.index
# Assert that input series has a sorted datetime index.
dbg.dassert_isinstance(original_index, pd.DatetimeIndex)
dbg.dassert_strictly_increasing_index(original_index)
freq = original_index.freq
clear_srs = hdataf.apply_nan_mode(srs, mode=nan_mode)
clear_index = clear_srs.index
result = {}
if clear_srs.empty:
_LOG.warning("Empty input series `%s`", srs.name)
result["start_time"] = np.nan
result["end_time"] = np.nan
else:
result["start_time"] = clear_index[0]
result["end_time"] = clear_index[-1]
result["n_sampling_points"] = len(clear_index)
result["frequency"] = freq
if freq is None:
sampling_points_per_year = clear_srs.resample("Y").count().mean()
else:
sampling_points_per_year = hdataf.compute_points_per_year_for_given_freq(
freq
)
result["sampling_points_per_year"] = sampling_points_per_year
# Compute input time span as a number of `freq` units in
# `clear_index`.
if not clear_srs.empty:
if freq is None:
clear_index_time_span = (clear_index[-1] - clear_index[0]).days
sampling_points_per_year = (
hdataf.compute_points_per_year_for_given_freq("D")
)
else:
clear_index_time_span = len(srs[clear_index[0] : clear_index[-1]])
else:
clear_index_time_span = 0
result["time_span_in_years"] = (
clear_index_time_span / sampling_points_per_year
)
result = pd.Series(result, dtype="object")
result.index = prefix + result.index
return result
def compute_special_value_stats(
srs: pd.Series,
prefix: Optional[str] = None,
) -> pd.Series:
"""
Calculate special value statistics in time series.
:param srs: pandas series of floats
:param prefix: optional prefix for metrics' outcome
:return: series of statistics
"""
prefix = prefix or ""
dbg.dassert_isinstance(srs, pd.Series)
result_index = [
prefix + "n_rows",
prefix + "frac_zero",
prefix + "frac_nan",
prefix + "frac_inf",
prefix + "frac_constant",
prefix + "num_finite_samples",
prefix + "num_unique_values",
]
nan_result = pd.Series(np.nan, index=result_index, name=srs.name)
if srs.empty:
_LOG.warning("Empty input series `%s`", srs.name)
return nan_result
result_values = [
len(srs),
compute_frac_zero(srs),
compute_frac_nan(srs),
compute_frac_inf(srs),
compute_zero_diff_proportion(srs).iloc[1],
count_num_finite_samples(srs),
count_num_unique_values(srs),
]
result = | pd.Series(data=result_values, index=result_index, name=srs.name) | pandas.Series |
import yahoo_fin.stock_info as si
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import time
from dateutil.relativedelta import relativedelta
import talib
import requests
import json
import openpyxl
import psycopg2 as pg
from Sentiment import *
### Work to add ###
# (1) Consider adding holidays to date_catch functinality
# (2) Get count of requested stocks that are in need of an update, instead of print "x is up to date"
# (3) Add Sentiment
# (4) Add ML resistance
# (5) Add ML trend
def __init__(conn, cur, finn_token, tickers, capital, risk):
talib = talib
conn = conn
cur = cur
finn_token = finn_token
capital = capital
risk = risk
tickers = tickers
def do_analysis(conn, cur, finn_token, tickers, capital, risk):
df_analyzed = pd.DataFrame(columns=['Ticker', 'Open', 'Quote', 'RSI', 'Trend', 'Above200', 'Earnings', 'Supp/Res', 'S/R Price', 'Pullback'])
# get earnings to avoid over clocking the API
df_earnings = get_earnings(finn_token)
for ticker in tickers:
update_data(ticker, conn, cur, finn_token)
print(f"Analyzing {ticker[0]}")
# Get historical data
data = get_hist(ticker, conn)
# Add indicator data
indicated_data = get_indicators(data)
# Analyze stonks:
df_analyzed = analyze_chart(ticker, indicated_data, df_analyzed, df_earnings, finn_token)
# Add Sentiment
#df_analyzed['ticker'] = df_analyzed['ticker'].apply(lambda x: )
df_analyzed = analyze_position(df_analyzed, capital, risk)
df_analyzed = df_analyzed[df_analyzed['Above200'] == True]
df_analyzed = df_analyzed[df_analyzed['RSI'] != None]
df_analyzed = df_analyzed[df_analyzed['Trend'] != None]
#df_analyzed = df_analyzed[df_analyzed['Earnings'] != None]
df_analyzed = df_analyzed[df_analyzed['Supp/Res'] != None]
df_analyzed = df_analyzed[df_analyzed['Pullback'] != None]
# Add Sentiment
sentiment_scores = get_sentiment_score(df_analyzed['Ticker'], conn, cur)
sentiment_scores.rename(columns={"ticker": "Ticker", "compound": "FinViz Sentiment"}, inplace=True)
df_analyzed = df_analyzed.join(sentiment_scores, on = ['Ticker'])
return df_analyzed
def update_data(ticker, conn, cur, finn_token):
df_insert = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
dfs = []
for i in range(24):
try:
df1 = pd.read_csv('result{}.csv'.format(i), sep = ';', encoding = 'utf-8', usecols= ['name', 'position', 'profession', 'profexp', 'anotherexp', 'strongs', 'contacts'])
dfs.append(df1)
except Exception:
print(i)
continue
df = | pd.concat(dfs) | pandas.concat |
from __future__ import absolute_import, division, print_function
import re
import traceback
import warnings
from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
from ..core import indexing
from ..core.formatting import first_n_items, format_timestamp, last_item
from ..core.pycompat import PY3
from ..core.variable import Variable
from .variables import (
SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to,
safe_setitem, unpack_for_decoding, unpack_for_encoding)
try:
from pandas.errors import OutOfBoundsDatetime
except ImportError:
# pandas < 0.20
from pandas.tslib import OutOfBoundsDatetime
# standard calendars recognized by netcdftime
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
_NS_PER_TIME_DELTA = {'us': int(1e3),
'ms': int(1e6),
's': int(1e9),
'm': int(1e9) * 60,
'h': int(1e9) * 60 * 60,
'D': int(1e9) * 60 * 60 * 24}
TIME_UNITS = frozenset(['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'])
def _import_netcdftime():
'''
helper function handle the transition to netcdftime as a stand-alone
package
'''
try:
# Try importing netcdftime directly
import netcdftime as nctime
if not hasattr(nctime, 'num2date'):
# must have gotten an old version from netcdf4-python
raise ImportError
except ImportError:
# in netCDF4 the num2date/date2num function are top-level api
try:
import netCDF4 as nctime
except ImportError:
raise ImportError("Failed to import netcdftime")
return nctime
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith('s'):
units = '%ss' % units
return {'microseconds': 'us', 'milliseconds': 'ms', 'seconds': 's',
'minutes': 'm', 'hours': 'h', 'days': 'D'}[units]
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace.
matches = re.match('(.+) since (.+)', units)
if not matches:
raise ValueError('invalid time units: %s' % units)
delta_units, ref_date = [s.strip() for s in matches.groups()]
return delta_units, ref_date
def _decode_datetime_with_netcdftime(num_dates, units, calendar):
nctime = _import_netcdftime()
dates = np.asarray(nctime.num2date(num_dates, units, calendar))
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'netcdftime.datetime objects instead, reason: dates out'
' of range', SerializationWarning, stacklevel=3)
else:
try:
dates = nctime_to_nptime(dates)
except ValueError as e:
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'dummy netcdftime.datetime objects instead, reason:'
'{0}'.format(e), SerializationWarning, stacklevel=3)
return dates
def _decode_cf_datetime_dtype(data, units, calendar):
# Verify that at least the first and last date can be decoded
# successfully. Otherwise, tracebacks end up swallowed by
# Dataset.__repr__ when users try to view their lazily decoded array.
values = indexing.ImplicitToExplicitIndexingAdapter(
indexing.as_indexable(data))
example_value = np.concatenate([first_n_items(values, 1) or [0],
last_item(values) or [0]])
try:
result = decode_cf_datetime(example_value, units, calendar)
except Exception:
calendar_msg = ('the default calendar' if calendar is None
else 'calendar %r' % calendar)
msg = ('unable to decode time units %r with %s. Try '
'opening your dataset with decode_times=False.'
% (units, calendar_msg))
if not PY3:
msg += ' Full traceback:\n' + traceback.format_exc()
raise ValueError(msg)
else:
dtype = getattr(result, 'dtype', np.dtype('object'))
return dtype
def decode_cf_datetime(num_dates, units, calendar=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than netcdftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
netcdftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using netcdftime
raise OutOfBoundsDatetime
# fixes: https://github.com/pydata/pandas/issues/14068
# these lines check if the the lowest or the highest value in dates
# cause an OutOfBoundsDatetime (Overflow) error
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# Cast input dates to integers of nanoseconds because `pd.to_datetime`
# works much faster when dealing with integers
# make _NS_PER_TIME_DELTA an array to ensure type upcasting
flat_num_dates_ns_int = (flat_num_dates.astype(np.float64) *
_NS_PER_TIME_DELTA[delta]).astype(np.int64)
dates = (pd.to_timedelta(flat_num_dates_ns_int, 'ns') +
ref_date).values
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_netcdftime(
flat_num_dates.astype(np.float), units, calendar)
return dates.reshape(num_dates.shape)
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
shape = num_timedeltas.shape
num_timedeltas = num_timedeltas.ravel()
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result.reshape(shape)
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit in ['days', 'hours', 'minutes', 'seconds']:
delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]
unit_delta = np.timedelta64(delta_ns, 'ns')
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return 'seconds'
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = pd.to_datetime(np.asarray(dates).ravel(), box=False)
dates = dates[pd.notnull(dates)]
unique_timedeltas = np.unique(np.diff(dates))
units = _infer_time_units_from_diff(unique_timedeltas)
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
return '%s since %s' % (units, pd.Timestamp(reference_date))
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units
def nctime_to_nptime(times):
"""Given an array of netcdftime.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype='M8[ns]')
for i, t in np.ndenumerate(times):
dt = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = '%s since %s' % (delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_netcdftime(dates, units, calendar):
"""Fallback method for encoding dates using netcdftime.
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
nctime = _import_netcdftime()
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype('M8[us]').astype(datetime)
def encode_datetime(d):
return np.nan if d is None else nctime.date2num(d, units, calendar)
return np.vectorize(encode_datetime)(dates)
def cast_to_int_if_safe(num):
int_num = np.array(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return num
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF compliant time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See also
--------
netcdftime.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = 'proleptic_gregorian'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == 'O':
# parse with netcdftime instead
raise OutOfBoundsDatetime
assert dates.dtype == 'datetime64[ns]'
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype('timedelta64[ns]')
ref_date = np.datetime64(pd.Timestamp(ref_date))
num = (dates - ref_date) / time_delta
except (OutOfBoundsDatetime, OverflowError):
num = _encode_datetime_with_netcdftime(dates, units, calendar)
num = cast_to_int_if_safe(num)
return (num, units, calendar)
def encode_cf_timedelta(timedeltas, units=None):
if units is None:
units = infer_timedelta_units(timedeltas)
np_unit = _netcdf_to_numpy_timeunit(units)
num = 1.0 * timedeltas / np.timedelta64(1, np_unit)
num = np.where( | pd.isnull(timedeltas) | pandas.isnull |
from autodesk.states import INACTIVE, ACTIVE, DOWN
from pandas import Timedelta
import numpy as np
import pandas as pd
def enumerate_hours(start, end):
time = start
while time < end:
yield (time.weekday(), time.hour)
time = time + Timedelta(hours=1)
def collect(default_state, initial, final, events):
assert initial < final
start = initial
state = default_state
for event in events.itertuples():
assert start <= event.date
if state == event.state:
# aggregate consecutive events with same state
continue
if start != event.date:
# do not emit zero-length spans
yield (start, event.date, state)
start = event.date
state = event.state
yield (start, final, state)
def cut(start, end, spans):
for span in spans.itertuples():
if span.end >= start and span.start <= end:
yield (
start if span.start < start else span.start,
end if span.end > end else span.end,
span.state
)
class Model:
def __init__(self, datastore):
self.datastore = datastore
def close(self):
self.datastore.close()
def set_desk(self, date, state):
self.datastore.set_desk(date, state)
def set_session(self, date, state):
self.datastore.set_session(date, state)
def get_desk_spans(self, initial, final):
spans = collect(
default_state=DOWN,
initial=initial,
final=final,
events=self.datastore.get_desk_events())
return pd.DataFrame.from_records(
spans, columns=['start', 'end', 'state'])
def get_session_spans(self, initial, final):
spans = collect(
default_state=INACTIVE,
initial=initial,
final=final,
events=self.datastore.get_session_events())
return pd.DataFrame.from_records(
spans, columns=['start', 'end', 'state'])
def get_session_state(self):
events = self.datastore.get_session_events()
return events.iloc[-1].state if not events.empty else INACTIVE
def get_desk_state(self):
events = self.datastore.get_desk_events()
return events.iloc[-1].state if not events.empty else DOWN
def get_active_time(self, initial, final):
session_spans = self.get_session_spans(initial, final)
if session_spans.iloc[-1].state == INACTIVE:
# TODO: Should return active time for current desk span
return | Timedelta(0) | pandas.Timedelta |
# -*- coding: utf-8 -*-
import sys, os
import datetime, time
from math import ceil, floor # ceil : 소수점 이하를 올림, floor : 소수점 이하를 버림
import math
import pickle
import uuid
import base64
import subprocess
from subprocess import Popen
import PyQt5
from PyQt5 import QtCore, QtGui, uic
from PyQt5 import QAxContainer
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (QApplication, QLabel, QLineEdit, QMainWindow, QDialog, QMessageBox, QProgressBar)
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
import numpy as np
from numpy import NaN, Inf, arange, isscalar, asarray, array
import pandas as pd
import pandas.io.sql as pdsql
from pandas import DataFrame, Series
# Google SpreadSheet Read/Write
import gspread # (추가 설치 모듈)
from oauth2client.service_account import ServiceAccountCredentials # (추가 설치 모듈)
from df2gspread import df2gspread as d2g # (추가 설치 모듈)
from string import ascii_uppercase # 알파벳 리스트
from bs4 import BeautifulSoup
import requests
import logging
import logging.handlers
import sqlite3
import telepot # 텔레그램봇(추가 설치 모듈)
from slacker import Slacker # 슬랙봇(추가 설치 모듈)
import csv
import FinanceDataReader as fdr
# Google Spreadsheet Setting *******************************
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
json_file_name = './secret/xtrader-276902-f5a8b77e2735.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file_name, scope)
gc = gspread.authorize(credentials)
# XTrader-Stocklist URL
# spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0' # Test Sheet
spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1XE4sk0vDw4fE88bYMDZuJbnP4AF9CmRYHKY6fCXABw4/edit#gid=0' # Sheeet
testsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0'
# spreadsheet 연결 및 worksheet setting
doc = gc.open_by_url(spreadsheet_url)
doc_test = gc.open_by_url(testsheet_url)
shortterm_buy_sheet = doc.worksheet('매수모니터링')
shortterm_sell_sheet = doc.worksheet('매도모니터링')
shortterm_strategy_sheet = doc.worksheet('ST bot')
shortterm_history_sheet = doc.worksheet('매매이력')
condition_history_sheet = doc_test.worksheet('조건식이력')
price_monitoring_sheet = doc_test.worksheet('주가모니터링')
shortterm_history_cols = ['번호', '종목명', '매수가', '매수수량', '매수일', '매수전략', '매수조건', '매도가', '매도수량',
'매도일', '매도전략', '매도구간', '수익률(계산)','수익률', '수익금', '세금+수수료', '확정 수익금']
shortterm_analysis_cols = ['번호', '종목명', '우선순위', '일봉1', '일봉2', '일봉3', '일봉4', '주봉1', '월봉1', '거래량', '기관수급', '외인수급', '개인']
condition_history_cols = ['종목명', '매수가', '매수일','매도가', '매도일', '수익률(계산)', '수익률', '수익금', '세금+수수료']
# 구글 스프레드시트 업데이트를 위한 알파벳리스트(열 이름 얻기위함)
alpha_list = list(ascii_uppercase)
# SQLITE DB Setting *****************************************
DATABASE = 'stockdata.db'
def sqliteconn():
conn = sqlite3.connect(DATABASE)
return conn
# DB에서 종목명으로 종목코드, 종목영, 시장구분 반환
def get_code(종목명체크):
# 종목명이 띄워쓰기, 대소문자 구분이 잘못될 것을 감안해서
# DB 저장 시 종목명체크 컬럼은 띄워쓰기 삭제 및 소문자로 저장됨
# 구글에서 받은 종목명을 띄워쓰기 삭제 및 소문자로 바꿔서 종목명체크와 일치하는 데이터 저장
# 종목명은 DB에 있는 정상 종목명으로 사용하도록 리턴
종목명체크 = 종목명체크.lower().replace(' ', '')
query = """
select 종목코드, 종목명, 시장구분
from 종목코드
where (종목명체크 = '%s')
""" % (종목명체크)
conn = sqliteconn()
df = pd.read_sql(query, con=conn)
conn.close()
return list(df[['종목코드', '종목명', '시장구분']].values)[0]
# 종목코드가 int형일 경우 정상적으로 반환
def fix_stockcode(data):
if len(data)< 6:
for i in range(6 - len(data)):
data = '0'+data
return data
# 구글 스프레드 시트 Import후 DataFrame 반환
def import_googlesheet():
try:
# 1. 매수 모니터링 시트 체크 및 매수 종목 선정
row_data = shortterm_buy_sheet.get_all_values() # 구글 스프레드시트 '매수모니터링' 시트 데이터 get
# 작성 오류 체크를 위한 주요 항목의 위치(index)를 저장
idx_strategy = row_data[0].index('기본매도전략')
idx_buyprice = row_data[0].index('매수가1')
idx_sellprice = row_data[0].index('목표가')
# DB에서 받아올 종목코드와 시장 컬럼 추가
# 번호, 종목명, 매수모니터링, 비중, 시가위치, 매수가1, 매수가2, 매수가3, 기존매도전략, 목표가
row_data[0].insert(2, '종목코드')
row_data[0].insert(3, '시장')
for row in row_data[1:]:
try:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
except Exception as e:
name = ''
code = ''
market = ''
print('구글 매수모니터링 시트 종목명 오류 : %s' % (row[1]))
logger.error('구글 매수모니터링 시트 오류 : %s' % (row[1]))
Telegram('[XTrader]구글 매수모니터링 시트 오류 : %s' % (row[1]))
row[1] = name # 정상 종목명으로 저장
row.insert(2, code)
row.insert(3, market)
data = pd.DataFrame(data=row_data[1:], columns=row_data[0])
# 사전 데이터 정리
data = data[(data['매수모니터링'] == '1') & (data['종목코드']!= '')]
data = data[row_data[0][:row_data[0].index('목표가')+1]]
del data['매수모니터링']
data.to_csv('%s_googlesheetdata.csv'%(datetime.date.today().strftime('%Y%m%d')), encoding='euc-kr', index=False)
# 2. 매도 모니터링 시트 체크(번호, 종목명, 보유일, 매도전략, 매도가)
row_data = shortterm_sell_sheet.get_all_values() # 구글 스프레드시트 '매도모니터링' 시트 데이터 get
# 작성 오류 체크를 위한 주요 항목의 위치(index)를 저장
idx_holding = row_data[0].index('보유일')
idx_strategy = row_data[0].index('매도전략')
idx_loss = row_data[0].index('손절가')
idx_sellprice = row_data[0].index('목표가')
if len(row_data) > 1:
for row in row_data[1:]:
try:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
if row[idx_holding] == '' : raise Exception('보유일 오류')
if row[idx_strategy] == '': raise Exception('매도전략 오류')
if row[idx_loss] == '': raise Exception('손절가 오류')
if row[idx_strategy] == '4' and row[idx_sellprice] == '': raise Exception('목표가 오류')
except Exception as e:
if str(e) != '보유일 오류' and str(e) != '매도전략 오류' and str(e) != '손절가 오류'and str(e) != '목표가 오류': e = '종목명 오류'
print('구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
logger.error('구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
Telegram('[XTrader]구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
# print(data)
print('[XTrader]구글 시트 확인 완료')
# Telegram('[XTrader]구글 시트 확인 완료')
# logger.info('[XTrader]구글 시트 확인 완료')
return data
except Exception as e:
# 구글 시트 import error시 에러 없어을 때 백업한 csv 읽어옴
print("import_googlesheet Error : %s"%e)
logger.error("import_googlesheet Error : %s"%e)
backup_file = datetime.date.today().strftime('%Y%m%d') + '_googlesheetdata.csv'
if backup_file in os.listdir():
data = pd.read_csv(backup_file, encoding='euc-kr')
data = data.fillna('')
data = data.astype(str)
data['종목코드'] = data['종목코드'].apply(fix_stockcode)
print("import googlesheet backup_file")
logger.info("import googlesheet backup_file")
return data
# Telegram Setting *****************************************
with open('./secret/telegram_token.txt', mode='r') as tokenfile:
TELEGRAM_TOKEN = tokenfile.readline().strip()
with open('./secret/chatid.txt', mode='r') as chatfile:
CHAT_ID = int(chatfile.readline().strip())
bot = telepot.Bot(TELEGRAM_TOKEN)
with open('./secret/Telegram.txt', mode='r') as tokenfile:
r = tokenfile.read()
TELEGRAM_TOKEN_yoo = r.split('\n')[0].split(', ')[1]
CHAT_ID_yoo = r.split('\n')[1].split(', ')[1]
bot_yoo = telepot.Bot(TELEGRAM_TOKEN_yoo)
telegram_enable = True
def Telegram(str, send='all'):
try:
if telegram_enable == True:
# if send == 'mc':
# bot.sendMessage(CHAT_ID, str)
# else:
# bot.sendMessage(CHAT_ID, str)
# bot_yoo.sendMessage(CHAT_ID_yoo, str)
bot.sendMessage(CHAT_ID, str)
else:
pass
except Exception as e:
Telegram('[StockTrader]Telegram Error : %s' % e, send='mc')
# Slack Setting ***********************************************
# with open('./secret/slack_token.txt', mode='r') as tokenfile:
# SLACK_TOKEN = tokenfile.readline().strip()
# slack = Slacker(SLACK_TOKEN)
# slack_enable = False
# def Slack(str):
# if slack_enable == True:
# slack.chat.post_message('#log', str)
# else:
# pass
# 매수 후 보유기간 계산 *****************************************
today = datetime.date.today()
def holdingcal(base_date, excluded=(6, 7)): # 예시 base_date = '2018-06-23'
yy = int(base_date[:4]) # 연도
mm = int(base_date[5:7]) # 월
dd = int(base_date[8:10]) # 일
base_d = datetime.date(yy, mm, dd)
delta = 0
while base_d <= today:
if base_d.isoweekday() not in excluded:
delta += 1
base_d += datetime.timedelta(days=1)
return delta # 당일도 1일로 계산됨
# 호가 계산(상한가, 현재가) *************************************
def hogacal(price, diff, market, option):
# diff 0 : 상한가 호가, -1 : 상한가 -1호가
if option == '현재가':
cal_price = price
elif option == '상한가':
cal_price = price * 1.3
if cal_price < 1000:
hogaunit = 1
elif cal_price < 5000:
hogaunit = 5
elif cal_price < 10000:
hogaunit = 10
elif cal_price < 50000:
hogaunit = 50
elif cal_price < 100000 and market == "KOSPI":
hogaunit = 100
elif cal_price < 500000 and market == "KOSPI":
hogaunit = 500
elif cal_price >= 500000 and market == "KOSPI":
hogaunit = 1000
elif cal_price >= 50000 and market == "KOSDAQ":
hogaunit = 100
cal_price = int(cal_price / hogaunit) * hogaunit + (hogaunit * diff)
return cal_price
# 종목별 현재가 크롤링 ******************************************
def crawler_price(code):
code = code[1:]
url = 'https://finance.naver.com/item/sise.nhn?code=%s' % (code)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
tag = soup.find("td", {"class": "num"})
return int(tag.text.replace(',',''))
로봇거래계좌번호 = None
주문딜레이 = 0.25
초당횟수제한 = 5
## 키움증권 제약사항 - 3.7초에 한번 읽으면 지금까지는 괜찮음
주문지연 = 3700 # 3.7초
로봇스크린번호시작 = 9000
로봇스크린번호종료 = 9999
# Table View 데이터 정리
class PandasModel(QtCore.QAbstractTableModel):
def __init__(self, data=None, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
if data is None:
self._data = DataFrame()
def rowCount(self, parent=None):
# return len(self._data.values)
return len(self._data.index)
def columnCount(self, parent=None):
return self._data.columns.size
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
# return QtCore.QVariant(str(self._data.values[index.row()][index.column()]))
return str(self._data.values[index.row()][index.column()])
# return QtCore.QVariant()
return None
def headerData(self, column, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self._data.columns[column]
return int(column + 1)
def update(self, data):
self._data = data
self.reset()
def reset(self):
self.beginResetModel()
# unnecessary call to actually clear data, but recommended by design guidance from Qt docs
# left blank in preliminary testing
self.endResetModel()
def flags(self, index):
return QtCore.Qt.ItemIsEnabled
# 포트폴리오에 사용되는 주식정보 클래스
# TradeShortTerm용 포트폴리오
class CPortStock_ShortTerm(object):
def __init__(self, 번호, 매수일, 종목코드, 종목명, 시장, 매수가, 매수조건, 보유일, 매도전략, 매도구간별조건, 매도구간=1, 매도가=0, 수량=0):
self.번호 = 번호
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.매수조건 = 매수조건
self.보유일 = 보유일
self.매도전략 = 매도전략
self.매도구간별조건 = 매도구간별조건
self.매도구간 = 매도구간
self.매도가 = 매도가
self.수량 = 수량
if self.매도전략 == '2' or self.매도전략 == '3':
self.목표도달 = False # 목표가(매도가) 도달 체크(False 상태로 구간 컷일경우 전량 매도)
self.매도조건 = '' # 구간매도 : B, 목표매도 : T
elif self.매도전략 == '4':
self.sellcount = 0
self.매도단위수량 = 0 # 전략4의 기본 매도 단위는 보유수량의 1/3
self.익절가1도달 = False
self.익절가2도달 = False
self.목표가도달 = False
# TradeLongTerm용 포트폴리오
class CPortStock_LongTerm(object):
def __init__(self, 매수일, 종목코드, 종목명, 시장, 매수가, 수량=0):
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.수량 = 수량
# 기본 로봇용 포트폴리오
class CPortStock(object):
def __init__(self, 매수일, 종목코드, 종목명, 시장, 매수가, 보유일, 매도전략, 매도구간=0, 매도전략변경1=False, 매도전략변경2=False, 수량=0):
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.보유일 = 보유일
self.매도전략 = 매도전략
self.매도구간 = 매도구간
self.매도전략변경1 = 매도전략변경1
self.매도전략변경2 = 매도전략변경2
self.수량 = 수량
# CTrade 거래로봇용 베이스클래스 : OpenAPI와 붙어서 주문을 내는 등을 하는 클래스
class CTrade(object):
def __init__(self, sName, UUID, kiwoom=None, parent=None):
"""
:param sName: 로봇이름
:param UUID: 로봇구분용 id
:param kiwoom: 키움OpenAPI
:param parent: 나를 부른 부모 - 보통은 메인윈도우
"""
# print("CTrade : __init__")
self.sName = sName
self.UUID = UUID
self.sAccount = None # 거래용계좌번호
self.kiwoom = kiwoom
self.parent = parent
self.running = False # 실행상태
self.portfolio = dict() # 포트폴리오 관리 {'종목코드':종목정보}
self.현재가 = dict() # 각 종목의 현재가
# 조건 검색식 종목 읽기
def GetCodes(self, Index, Name, Type):
logger.info("[%s]조건 검색식 종목 읽기"%(self.sName))
# self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
# self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
# self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
try:
self.getConditionLoad()
print('getload 완료')
print('조건 검색 :', Name, int(Index), Type)
codelist = self.sendCondition("0156", Name, int(Index), Type) # 선정된 검색조건식으로 바로 종목 검색
print('GetCodes :', self.codeList)
return self.codeList
except Exception as e:
print("GetCondition_Error")
print(e)
def getConditionLoad(self):
print('getConditionLoad')
self.kiwoom.dynamicCall("GetConditionLoad()")
# receiveConditionVer() 이벤트 메서드에서 루프 종료
self.ConditionLoop = QEventLoop()
self.ConditionLoop.exec_()
def getConditionNameList(self):
print('getConditionNameList')
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
# print(conditionDictionary)
return conditionDictionary
# 조건식 조회
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
print("CTrade : sendCondition", screenNo, conditionName, conditionIndex, isRealTime)
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int)",
screenNo, conditionName, conditionIndex, isRealTime)
# receiveTrCondition() 이벤트 메서드에서 루프 종료
# 실시간 검색일 경우 Loop 미적용해서 바로 조회 등록이 되게 해야됨
# if self.조건검색타입 ==0:
self.ConditionLoop = QEventLoop()
self.ConditionLoop.exec_()
# 조건식 조회 중지
def sendConditionStop(self, screenNo, conditionName, conditionIndex):
# print("CTrade : sendConditionStop", screenNo, conditionName, conditionIndex)
isRequest = self.kiwoom.dynamicCall("SendConditionStop(QString, QString, int)",
screenNo, conditionName, conditionIndex)
# 계좌 보유 종목 받음
def InquiryList(self, _repeat=0):
# print("CTrade : InquiryList")
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018", _repeat, '{:04d}'.format(self.sScreenNo))
self.InquiryLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.InquiryLoop.exec_()
# 금일 매도 종목에 대해서 수익률, 수익금, 수수료 요청(일별종목별실현손익요청)
def DailyProfit(self, 금일매도종목):
_repeat = 0
# self.sAccount = 로봇거래계좌번호
# self.sScreenNo = self.ScreenNumber
시작일자 = datetime.date.today().strftime('%Y%m%d')
cnt = 1
for 종목코드 in 금일매도종목:
# print(self.sScreenNo, 종목코드, 시작일자)
self.update_cnt = len(금일매도종목) - cnt
cnt += 1
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", 종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "시작일자", 시작일자)
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "일자별종목별실현손익요청", "OPT10072",
_repeat, '{:04d}'.format(self.sScreenNo))
self.DailyProfitLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.DailyProfitLoop.exec_()
# 일별종목별실현손익 응답 결과 구글 업로드
def DailyProfitUpload(self, 매도결과):
# 매도결과 ['종목명','체결량','매입단가','체결가','당일매도손익','손익율','당일매매수수료','당일매매세금']
print(매도결과)
if self.sName == 'TradeShortTerm':
history_sheet = shortterm_history_sheet
history_cols = shortterm_history_cols
elif self.sName == 'TradeCondition':
history_sheet = condition_history_sheet
history_cols = condition_history_cols
try:
code_row = history_sheet.findall(매도결과[0])[-1].row
계산수익률 = round((int(float(매도결과[3])) / int(float(매도결과[2])) - 1) * 100, 2)
cell = alpha_list[history_cols.index('매수가')] + str(code_row) # 매입단가
history_sheet.update_acell(cell, int(float(매도결과[2])))
cell = alpha_list[history_cols.index('매도가')] + str(code_row) # 체결가
history_sheet.update_acell(cell, int(float(매도결과[3])))
cell = alpha_list[history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
history_sheet.update_acell(cell, 계산수익률)
cell = alpha_list[history_cols.index('수익률')] + str(code_row) # 손익율
history_sheet.update_acell(cell, 매도결과[5])
cell = alpha_list[history_cols.index('수익금')] + str(code_row) # 손익율
history_sheet.update_acell(cell, int(float(매도결과[4])))
cell = alpha_list[history_cols.index('세금+수수료')] + str(code_row) # 당일매매수수료 + 당일매매세금
history_sheet.update_acell(cell, int(float(매도결과[6])) + int(float(매도결과[7])))
self.DailyProfitLoop.exit()
if self.update_cnt == 0:
print('금일 실현 손익 구글 업로드 완료')
Telegram("[StockTrader]금일 실현 손익 구글 업로드 완료")
logger.info("[StockTrader]금일 실현 손익 구글 업로드 완료")
except:
self.DailyProfitLoop.exit() # 강제 루프 해제
print('[StockTrader]CTrade:DailyProfitUpload_%s 매도 이력 없음' % 매도결과[0])
logger.error('CTrade:DailyProfitUpload_%s 매도 이력 없음' % 매도결과[0])
# 포트폴리오의 상태
def GetStatus(self):
# print("CTrade : GetStatus")
try:
result = []
for p, v in self.portfolio.items():
result.append('%s(%s)[P%s/V%s/D%s]' % (v.종목명.strip(), v.종목코드, v.매수가, v.수량, v.매수일))
return [self.__class__.__name__, self.sName, self.UUID, self.sScreenNo, self.running, len(self.portfolio), ','.join(result)]
except Exception as e:
print('CTrade_GetStatus Error', e)
logger.error('CTrade_GetStatus Error : %s' % e)
def GenScreenNO(self):
"""
:return: 키움증권에서 요구하는 스크린번호를 생성
"""
# print("CTrade : GenScreenNO")
self.SmallScreenNumber += 1
if self.SmallScreenNumber > 9999:
self.SmallScreenNumber = 0
return self.sScreenNo * 10000 + self.SmallScreenNumber
def GetLoginInfo(self, tag):
"""
:param tag:
:return: 로그인정보 호출
"""
# print("CTrade : GetLoginInfo")
return self.kiwoom.dynamicCall('GetLoginInfo("%s")' % tag)
def KiwoomConnect(self):
"""
:return: 키움증권OpenAPI의 CallBack에 대응하는 처리함수를 연결
"""
# print("CTrade : KiwoomConnect")
try:
self.kiwoom.OnEventConnect[int].connect(self.OnEventConnect)
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
self.kiwoom.OnReceiveChejanData[str, int, str].connect(self.OnReceiveChejanData)
self.kiwoom.OnReceiveRealData[str, str, str].connect(self.OnReceiveRealData)
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
except Exception as e:
print("CTrade : [%s]KiwoomConnect Error :"&(self.sName, e))
# logger.info("%s : connected" % self.sName)
def KiwoomDisConnect(self):
"""
:return: Callback 연결해제
"""
# print("CTrade : KiwoomDisConnect")
try:
self.kiwoom.OnEventConnect[int].disconnect(self.OnEventConnect)
except Exception:
pass
try:
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
except Exception:
pass
try:
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
except Exception:
pass
try:
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
except Exception:
pass
try:
self.kiwoom.OnReceiveChejanData[str, int, str].disconnect(self.OnReceiveChejanData)
except Exception:
pass
try:
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
except Exception:
pass
try:
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
except Exception:
pass
try:
self.kiwoom.OnReceiveRealData[str, str, str].disconnect(self.OnReceiveRealData)
except Exception:
pass
# logger.info("%s : disconnected" % self.sName)
def KiwoomAccount(self):
"""
:return: 계좌정보를 읽어옴
"""
# print("CTrade : KiwoomAccount")
ACCOUNT_CNT = self.GetLoginInfo('ACCOUNT_CNT')
ACC_NO = self.GetLoginInfo('ACCNO')
self.account = ACC_NO.split(';')[0:-1]
self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.account[0])
self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "d+2예수금요청", "opw00001", 0, '{:04d}'.format(self.sScreenNo))
self.depositLoop = QEventLoop() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
self.depositLoop.exec_()
# logger.debug("보유 계좌수: %s 계좌번호: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO))
def KiwoomSendOrder(self, sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo):
"""
OpenAPI 메뉴얼 참조
:param sRQName:
:param sScreenNo:
:param sAccNo:
:param nOrderType:
:param sCode:
:param nQty:
:param nPrice:
:param sHogaGb:
:param sOrgOrderNo:
:return:
"""
# print("CTrade : KiwoomSendOrder")
try:
order = self.kiwoom.dynamicCall(
'SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)',
[sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo])
return order
except Exception as e:
print('CTrade_KiwoomSendOrder Error ', e)
Telegram('[StockTrader]CTrade_KiwoomSendOrder Error: %s' % e, send='mc')
logger.error('CTrade_KiwoomSendOrder Error : %s' % e)
# -거래구분값 확인(2자리)
#
# 00 : 지정가
# 03 : 시장가
# 05 : 조건부지정가
# 06 : 최유리지정가
# 07 : 최우선지정가
# 10 : 지정가IOC
# 13 : 시장가IOC
# 16 : 최유리IOC
# 20 : 지정가FOK
# 23 : 시장가FOK
# 26 : 최유리FOK
# 61 : 장전 시간외단일가매매
# 81 : 장후 시간외종가
# 62 : 시간외단일가매매
#
# -매매구분값 (1 자리)
# 1 : 신규매수
# 2 : 신규매도
# 3 : 매수취소
# 4 : 매도취소
# 5 : 매수정정
# 6 : 매도정정
def KiwoomSetRealReg(self, sScreenNo, sCode, sRealType='0'):
"""
OpenAPI 메뉴얼 참조
:param sScreenNo:
:param sCode:
:param sRealType:
:return:
"""
# print("CTrade : KiwoomSetRealReg")
ret = self.kiwoom.dynamicCall('SetRealReg(QString, QString, QString, QString)', sScreenNo, sCode, '9001;10',
sRealType)
return ret
def KiwoomSetRealRemove(self, sScreenNo, sCode):
"""
OpenAPI 메뉴얼 참조
:param sScreenNo:
:param sCode:
:return:
"""
# print("CTrade : KiwoomSetRealRemove")
ret = self.kiwoom.dynamicCall('SetRealRemove(QString, QString)', sScreenNo, sCode)
return ret
def OnEventConnect(self, nErrCode):
"""
OpenAPI 메뉴얼 참조
:param nErrCode:
:return:
"""
# print("CTrade : OnEventConnect")
logger.debug('OnEventConnect', nErrCode)
def OnReceiveMsg(self, sScrNo, sRQName, sTRCode, sMsg):
"""
OpenAPI 메뉴얼 참조
:param sScrNo:
:param sRQName:
:param sTRCode:
:param sMsg:
:return:
"""
# print("CTrade : OnReceiveMsg")
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTRCode, sMsg))
# self.InquiryLoop.exit()
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
"""
OpenAPI 메뉴얼 참조
:param sScrNo:
:param sRQName:
:param sTRCode:
:param sRecordName:
:param sPreNext:
:param nDataLength:
:param sErrorCode:
:param sMessage:
:param sSPlmMsg:
:return:
"""
# print('CTrade : OnReceiveTrData')
try:
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo[:4]):
return
if 'B_' in sRQName or 'S_' in sRQName:
주문번호 = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, 0, "주문번호")
# logger.debug("화면번호: %s sRQName : %s 주문번호: %s" % (sScrNo, sRQName, 주문번호))
self.주문등록(sRQName, 주문번호)
if sRQName == "d+2예수금요청":
data = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)',sTRCode, "", sRQName, 0, "d+2추정예수금")
# 입력된 문자열에 대해 lstrip 메서드를 통해 문자열 왼쪽에 존재하는 '-' 또는 '0'을 제거. 그리고 format 함수를 통해 천의 자리마다 콤마를 추가한 문자열로 변경
strip_data = data.lstrip('-0')
if strip_data == '':
strip_data = '0'
format_data = format(int(strip_data), ',d')
if data.startswith('-'):
format_data = '-' + format_data
self.sAsset = format_data
self.depositLoop.exit() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
if sRQName == "계좌평가잔고내역요청":
print("계좌평가잔고내역요청_수신")
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
self.CList = []
for i in range(0, cnt):
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, '종목번호').strip().lstrip('0')
# print(S)
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
S = self.종목코드변환(S) # 종목코드 맨 첫 'A'를 삭제하기 위함
self.CList.append(S)
# logger.debug("%s" % row)
if sPreNext == '2':
self.remained_data = True
self.InquiryList(_repeat=2)
else:
self.remained_data = False
print(self.CList)
self.InquiryLoop.exit()
if sRQName == "일자별종목별실현손익요청":
try:
data_idx = ['종목명', '체결량', '매입단가', '체결가', '당일매도손익', '손익율', '당일매매수수료', '당일매매세금']
result = []
for idx in data_idx:
data = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode,
"",
sRQName, 0, idx)
result.append(data.strip())
self.DailyProfitUpload(result)
except Exception as e:
print(e)
logger.error('일자별종목별실현손익요청 Error : %s' % e)
except Exception as e:
print('CTrade_OnReceiveTrData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveTrData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveTrData Error : %s' % e)
def OnReceiveChejanData(self, sGubun, nItemCnt, sFidList):
"""
OpenAPI 메뉴얼 참조
:param sGubun:
:param nItemCnt:
:param sFidList:
:return:
"""
# logger.debug('OnReceiveChejanData [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
# 주문체결시 순서
# 1 구분:0 GetChejanData(913) = '접수'
# 2 구분:0 GetChejanData(913) = '체결'
# 3 구분:1 잔고정보
"""
# sFid별 주요데이터는 다음과 같습니다.
# "9201" : "계좌번호"
# "9203" : "주문번호"
# "9001" : "종목코드"
# "913" : "주문상태"
# "302" : "종목명"
# "900" : "주문수량"
# "901" : "주문가격"
# "902" : "미체결수량"
# "903" : "체결누계금액"
# "904" : "원주문번호"
# "905" : "주문구분"
# "906" : "매매구분"
# "907" : "매도수구분"
# "908" : "주문/체결시간"
# "909" : "체결번호"
# "910" : "체결가"
# "911" : "체결량"
# "10" : "현재가"
# "27" : "(최우선)매도호가"
# "28" : "(최우선)매수호가"
# "914" : "단위체결가"
# "915" : "단위체결량"
# "919" : "거부사유"
# "920" : "화면번호"
# "917" : "신용구분"
# "916" : "대출일"
# "930" : "보유수량"
# "931" : "매입단가"
# "932" : "총매입가"
# "933" : "주문가능수량"
# "945" : "당일순매수수량"
# "946" : "매도/매수구분"
# "950" : "당일총매도손일"
# "951" : "예수금"
# "307" : "기준가"
# "8019" : "손익율"
# "957" : "신용금액"
# "958" : "신용이자"
# "918" : "만기일"
# "990" : "당일실현손익(유가)"
# "991" : "당일실현손익률(유가)"
# "992" : "당일실현손익(신용)"
# "993" : "당일실현손익률(신용)"
# "397" : "파생상품거래단위"
# "305" : "상한가"
# "306" : "하한가"
"""
# print("CTrade : OnReceiveChejanData")
try:
# 접수
if sGubun == "0":
# logger.debug('OnReceiveChejanData: 접수 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
화면번호 = self.kiwoom.dynamicCall('GetChejanData(QString)', 920)
if len(화면번호.replace(' ','')) == 0 : # 로봇 실행중 영웅문으로 주문 발생 시 화면번호가 ' '로 들어와 에러발생함 방지
print('다른 프로그램을 통한 거래 발생')
Telegram('다른 프로그램을 통한 거래 발생', send='mc')
logger.info('다른 프로그램을 통한 거래 발생')
return
elif self.sScreenNo != int(화면번호[:4]):
return
param = dict()
param['sGubun'] = sGubun
param['계좌번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9201)
param['주문번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9203)
param['종목코드'] = self.종목코드변환(self.kiwoom.dynamicCall('GetChejanData(QString)', 9001))
param['주문업무분류'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 912)
# 접수 / 체결 확인
# 주문상태(10:원주문, 11:정정주문, 12:취소주문, 20:주문확인, 21:정정확인, 22:취소확인, 90-92:주문거부)
param['주문상태'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 913) # 접수 or 체결 확인
param['종목명'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 302).strip()
param['주문수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 900)
param['주문가격'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 901)
param['미체결수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 902)
param['체결누계금액'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 903)
param['원주문번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 904)
param['주문구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 905)
param['매매구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 906)
param['매도수구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 907)
param['체결시간'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 908)
param['체결번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 909)
param['체결가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 910)
param['체결량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 911)
param['현재가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 10)
param['매도호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 27)
param['매수호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 28)
param['단위체결가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 914).strip()
param['단위체결량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 915)
param['화면번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 920)
param['당일매매수수료'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 938)
param['당일매매세금'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 939)
param['체결수량'] = int(param['주문수량']) - int(param['미체결수량'])
logger.debug('접수 - 주문상태:{주문상태} 계좌번호:{계좌번호} 체결시간:{체결시간} 주문번호:{주문번호} 체결번호:{체결번호} 종목코드:{종목코드} 종목명:{종목명} 체결량:{체결량} 체결가:{체결가} 단위체결가:{단위체결가} 주문수량:{주문수량} 체결수량:{체결수량} 단위체결량:{단위체결량} 미체결수량:{미체결수량} 당일매매수수료:{당일매매수수료} 당일매매세금:{당일매매세금}'.format(**param))
# if param["주문상태"] == "접수":
# self.접수처리(param)
# if param["주문상태"] == "체결": # 매도의 경우 체결로 안들어옴
# self.체결처리(param)
self.체결처리(param)
# 잔고통보
if sGubun == "1":
# logger.debug('OnReceiveChejanData: 잔고통보 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
param = dict()
param['sGubun'] = sGubun
param['계좌번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9201)
param['종목코드'] = self.종목코드변환(self.kiwoom.dynamicCall('GetChejanData(QString)', 9001))
param['신용구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 917)
param['대출일'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 916)
param['종목명'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 302).strip()
param['현재가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 10)
param['보유수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 930)
param['매입단가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 931)
param['총매입가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 932)
param['주문가능수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 933)
param['당일순매수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 945)
param['매도매수구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 946)
param['당일총매도손익'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 950)
param['예수금'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 951)
param['매도호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 27)
param['매수호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 28)
param['기준가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 307)
param['손익율'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 8019)
param['신용금액'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 957)
param['신용이자'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 958)
param['만기일'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 918)
param['당일실현손익_유가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 990)
param['당일실현손익률_유가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 991)
param['당일실현손익_신용'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 992)
param['당일실현손익률_신용'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 993)
param['담보대출수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 959)
logger.debug('잔고통보 - 계좌번호:{계좌번호} 종목명:{종목명} 보유수량:{보유수량} 매입단가:{매입단가} 총매입가:{총매입가} 손익율:{손익율} 당일총매도손익:{당일총매도손익} 당일순매수량:{당일순매수량}'.format(**param))
self.잔고처리(param)
# 특이신호
if sGubun == "3":
logger.debug('OnReceiveChejanData: 특이신호 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
pass
except Exception as e:
print('CTrade_OnReceiveChejanData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveChejanData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveChejanData Error : %s' % e)
def OnReceiveRealData(self, sRealKey, sRealType, sRealData):
"""
OpenAPI 메뉴얼 참조
:param sRealKey:
:param sRealType:
:param sRealData:
:return:
"""
# logger.debug('OnReceiveRealData [%s] [%s] [%s]' % (sRealKey, sRealType, sRealData))
_now = datetime.datetime.now()
try:
if _now.strftime('%H:%M:%S') < '09:00:00': # 9시 이전 데이터 버림(장 시작 전에 테이터 들어오는 것도 많으므로 버리기 위함)
return
if sRealKey not in self.실시간종목리스트: # 리스트에 없는 데이터 버림
return
if sRealType == "주식시세" or sRealType == "주식체결":
param = dict()
param['종목코드'] = self.종목코드변환(sRealKey)
param['체결시간'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 20).strip()
param['현재가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 10).strip()
param['전일대비'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 11).strip()
param['등락률'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 12).strip()
param['매도호가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 27).strip()
param['매수호가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 28).strip()
param['누적거래량'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 13).strip()
param['시가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 16).strip()
param['고가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 17).strip()
param['저가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 18).strip()
param['거래회전율'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 31).strip()
param['시가총액'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 311).strip()
self.실시간데이터처리(param)
except Exception as e:
print('CTrade_OnReceiveRealData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveRealData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveRealData Error : %s' % e)
def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext):
print('OnReceiveTrCondition')
try:
if strCodeList == "":
self.ConditionLoop.exit()
return []
self.codeList = strCodeList.split(';')
del self.codeList[-1]
# print(self.codeList)
logger.info("[%s]조건 검색 완료"%(self.sName))
self.ConditionLoop.exit()
print('OnReceiveTrCondition :', self.codeList)
return self.codeList
except Exception as e:
print("OnReceiveTrCondition_Error")
print(e)
def OnReceiveConditionVer(self, lRet, sMsg):
print('OnReceiveConditionVer')
try:
self.condition = self.getConditionNameList()
except Exception as e:
print("CTrade : OnReceiveConditionVer_Error")
finally:
self.ConditionLoop.exit()
def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex):
# print("CTrade : OnReceiveRealCondition")
# OpenAPI 메뉴얼 참조
# :param sTrCode:
# :param strType:
# :param strConditionName:
# :param strConditionIndex:
# :return:
_now = datetime.datetime.now().strftime('%H:%M:%S')
if (_now >= '10:00:00' and _now < '13:00:00') or _now >= '15:17:00': # 10시부터 13시 이전 데이터 버림, 15시 17분 당일 매도 처리 후 데이터 버림
return
# logger.info('OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex))
print("실시간조검검색_종목코드: %s %s / Time : %s"%(sTrCode, "종목편입" if strType == "I" else "종목이탈", _now))
if strType == 'I':
self.실시간조건처리(sTrCode)
def 종목코드변환(self, code): # TR 통해서 받은 종목 코드에 A가 붙을 경우 삭제
return code.replace('A', '')
def 정량매수(self, sRQName, 종목코드, 매수가, 수량):
# sRQName = '정량매수%s' % self.sScreenNo
sScreenNo = self.GenScreenNO() # 주문을 낼때 마다 스크린번호를 생성
sAccNo = self.sAccount
nOrderType = 1 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매수가
sHogaGb = self.매수방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo)
return ret
def 정액매수(self, sRQName, 종목코드, 매수가, 매수금액):
# sRQName = '정액매수%s' % self.sScreenNo
try:
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 1 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 매수금액 // 매수가
nPrice = 매수가
sHogaGb = self.매수방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
# logger.debug('주문 - %s %s %s %s %s %s %s %s %s', sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo)
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
except Exception as e:
print('CTrade_정액매수 Error ', e)
Telegram('[StockTrader]CTrade_정액매수 Error : %s' % e, send='mc')
logger.error('CTrade_정액매수 Error : %s' % e)
def 정량매도(self, sRQName, 종목코드, 매도가, 수량):
# sRQName = '정량매도%s' % self.sScreenNo
try:
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 2 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매도가
sHogaGb = self.매도방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
except Exception as e:
print('[%s]정량매도 Error '%(self.sName,e))
Telegram('[StockTrader][%s]정량매도 Error : %s' % (self.sName, e), send='mc')
logger.error('[%s]정량매도 Error : %s' % (self.sName, e))
def 정액매도(self, sRQName, 종목코드, 매도가, 수량):
# sRQName = '정액매도%s' % self.sScreenNo
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 2 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매도가
sHogaGb = self.매도방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
def 주문등록(self, sRQName, 주문번호):
self.주문번호_주문_매핑[주문번호] = sRQName
Ui_계좌정보조회, QtBaseClass_계좌정보조회 = uic.loadUiType("./UI/계좌정보조회.ui")
class 화면_계좌정보(QDialog, Ui_계좌정보조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_계좌정보, self).__init__(parent) # Initialize하는 형식
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목번호', '종목명', '현재가', '보유수량', '매입가', '매입금액', '평가금액', '수익률(%)', '평가손익', '매매가능수량']
self.보이는컬럼 = ['종목번호', '종목명', '현재가', '보유수량', '매입가', '매입금액', '평가금액', '수익률(%)', '평가손익', '매매가능수량'] # 주당 손익 -> 수익률(%)
self.result = []
self.KiwoomAccount()
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def KiwoomAccount(self):
ACCOUNT_CNT = self.kiwoom.dynamicCall('GetLoginInfo("ACCOUNT_CNT")')
ACC_NO = self.kiwoom.dynamicCall('GetLoginInfo("ACCNO")')
self.account = ACC_NO.split(';')[0:-1] # 계좌번호가 ;가 붙어서 나옴(에로 계좌가 3개면 111;222;333)
self.comboBox.clear()
self.comboBox.addItems(self.account)
logger.debug("보유 계좌수: %s 계좌번호: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO))
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (
sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if sRQName == "계좌평가잔고내역요청":
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
# print(j)
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0')
# print(S)
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
# logger.debug("%s" % row)
if sPreNext == '2':
self.Request(_repeat=2)
else:
self.model.update(DataFrame(data=self.result, columns=self.보이는컬럼))
print(self.result)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
계좌번호 = self.comboBox.currentText().strip()
logger.debug("계좌번호 %s" % 계좌번호)
# KOA StudioSA에서 opw00018 확인
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", 계좌번호) # 8132495511
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018", _repeat,'{:04d}'.format(self.sScreenNo))
# 조회 버튼(QtDesigner에서 조회버튼 누르고 오른쪽 하단에 시그널/슬롯편집기를 보면 조회버튼 시그널(clicked), 슬롯(Inquiry())로 확인가능함
def inquiry(self):
self.result = []
self.Request(_repeat=0)
def robot_account(self):
global 로봇거래계좌번호
로봇거래계좌번호 = self.comboBox.currentText().strip()
# sqlite3 사용
try:
with sqlite3.connect(DATABASE) as conn:
cursor = conn.cursor()
robot_account = pickle.dumps(로봇거래계좌번호, protocol=pickle.HIGHEST_PROTOCOL, fix_imports=True)
_robot_account = base64.encodebytes(robot_account)
cursor.execute("REPLACE into Setting(keyword, value) values (?, ?)",
['robotaccount', _robot_account])
conn.commit()
print("로봇 계좌 등록 완료")
except Exception as e:
print('robot_account', e)
Ui_일자별주가조회, QtBaseClass_일자별주가조회 = uic.loadUiType("./UI/일자별주가조회.ui")
class 화면_일별주가(QDialog, Ui_일자별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_일별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('일자별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['일자', '현재가', '거래량', '시가', '고가', '저가', '거래대금']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "주식일봉차트조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['종목코드'] = self.종목코드
self.model.update(df[['종목코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.종목코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식일봉차트조회", "OPT10081", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_분별주가조회, QtBaseClass_분별주가조회 = uic.loadUiType("./UI/분별주가조회.ui")
class 화면_분별주가(QDialog, Ui_분별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_분별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('분별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['체결시간', '현재가', '시가', '고가', '저가', '거래량']
self.result = []
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
print('화면_분별주가 : OnReceiveTrData')
if self.sScreenNo != int(sScrNo):
return
if sRQName == "주식분봉차트조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and (S[0] == '-' or S[0] == '+'):
S = S[1:].lstrip('0')
row.append(S)
self.result.append(row)
# df = DataFrame(data=self.result, columns=self.columns)
# df.to_csv('분봉.csv', encoding='euc-kr')
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df.to_csv('분봉.csv', encoding='euc-kr', index=False)
df['종목코드'] = self.종목코드
self.model.update(df[['종목코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.종목코드 = self.lineEdit_code.text().strip()
틱범위 = self.comboBox_min.currentText()[0:2].strip()
if 틱범위[0] == '0':
틱범위 = 틱범위[1:]
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "틱범위", 틱범위)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식분봉차트조회", "OPT10080", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_업종정보, QtBaseClass_업종정보 = uic.loadUiType("./UI/업종정보조회.ui")
class 화면_업종정보(QDialog, Ui_업종정보):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_업종정보, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('업종정보 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목코드', '종목명', '현재가', '대비기호', '전일대비', '등락률', '거래량', '비중', '거래대금', '상한', '상승', '보합', '하락', '하한',
'상장종목수']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage,
sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "업종정보조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['업종코드'] = self.업종코드
df.to_csv("업종정보.csv")
self.model.update(df[['업종코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.업종코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "업종코드", self.업종코드)
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "업종정보조회", "OPT20003", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_업종별주가조회, QtBaseClass_업종별주가조회 = uic.loadUiType("./UI/업종별주가조회.ui")
class 화면_업종별주가(QDialog, Ui_업종별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_업종별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('업종별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['현재가', '거래량', '일자', '시가', '고가', '저가', '거래대금', '대업종구분', '소업종구분', '종목정보', '수정주가이벤트', '전일종가']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage,
sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "업종일봉조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = | DataFrame(data=self.result, columns=self.columns) | pandas.DataFrame |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library for analyzing benchmark results for Instant start."""
import pandas
from scipy import stats
def print_report(runs, model, control='control', experiment='experiment'):
"""Print stats of A/B testing"""
all_df = pandas.DataFrame(runs, dtype=float)
report = pandas.DataFrame(
columns=['Median', 'Diff with control', 'p-value'])
for metric in sorted(set(all_df['metric_name'])):
mdf = all_df[all_df['metric_name'] == metric]
df = pandas.DataFrame()
for variant in sorted(set(all_df['variant_name'])):
df[variant] = mdf[mdf['variant_name'] == variant]\
.value.reset_index(drop=True)
diff_df = pandas.DataFrame()
diff_df = df[experiment] - df[control]
n = len(diff_df)
row = {}
row['Median'] = '%.1fms' % df[experiment].median()
row['Diff with control'] = '%.1fms (%.2f%%)' % (
diff_df.median(), diff_df.median() / df[experiment].median() * 100)
row['p-value'] = '%f' % (stats.ttest_rel(df[experiment],
df[control])[1])
report = report.append( | pandas.Series(data=row, name=metric) | pandas.Series |
from abc import ABCMeta, abstractmethod
import pandas as pd
import logging
| pd.set_option('precision', 10) | pandas.set_option |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(
result: np.ndarray, mask: np.ndarray, other: Scalar
) -> Tuple[np.ndarray, bool]:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : scalar
The source value.
Returns
-------
result : ndarray
changed : bool
Set to true if the result array was upcasted.
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result, _ = maybe_upcast_putmask(arr, mask, False)
>>> result
array([1, 0, 3, 0, 0])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if not is_scalar(other):
# We _could_ support non-scalar other, but until we have a compelling
# use case, we assume away the possibility.
raise ValueError("other must be a scalar")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
if isna(other):
other = result.dtype.type("nat")
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if isna(other):
return changeit()
try:
np.place(result, mask, other)
except TypeError:
# e.g. int-dtype result and float-dtype other
return changeit()
return result, False
def maybe_casted_values(
index: "Index", codes: Optional[np.ndarray] = None
) -> ArrayLike:
"""
Convert an index, given directly or as a pair (level, code), to a 1D array.
Parameters
----------
index : Index
codes : np.ndarray[intp] or None, default None
Returns
-------
ExtensionArray or ndarray
If codes is `None`, the values of `index`.
If codes is passed, an array obtained by taking from `index` the indices
contained in `codes`.
"""
values = index._values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
mask: np.ndarray = codes == -1
if mask.size > 0 and mask.all():
# we can have situations where the whole mask is -1,
# meaning there is nothing found in codes, so make all nan's
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
else:
values = values.take(codes)
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
return values
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
val = Timestamp(val)
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
return dtype, val
def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:
"""
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
Parameters
----------
d: dict-like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
"""
Infer the dtype from an array.
Parameters
----------
arr : array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_array_dtype(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""
Try to infer an object's dtype, for use in arithmetic ops.
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
dtype('int64')
"""
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(
values: ArrayLike,
fill_value: Scalar = np.nan,
dtype: Dtype = None,
copy: bool = False,
) -> Tuple[ArrayLike, Scalar]:
"""
Provide explicit type promotion and coercion.
Parameters
----------
values : ndarray or ExtensionArray
The array that we want to maybe upcast.
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : bool, default True
If True always make a copy even if no upcast is required.
Returns
-------
values: ndarray or ExtensionArray
the original array, possibly upcast
fill_value:
the fill value, possibly upcast
"""
if not is_scalar(fill_value) and not is_object_dtype(values.dtype):
# We allow arbitrary fill values for object dtype
raise ValueError("fill_value must be a scalar")
if is_extension_array_dtype(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def invalidate_string_dtypes(dtype_set: Set[DtypeObj]):
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def astype_nansafe(
arr, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
return lib.ensure_string_array(
arr.ravel(), skipna=skipna, convert_na_value=False
).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
if dtype not in [INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
if dtype.kind == "m":
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == TD64NS_DTYPE:
return arr.astype(TD64NS_DTYPE, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def soft_convert_objects(
values: np.ndarray,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
copy: bool = True,
):
"""
Try to coerce datetime, timedelta, and numeric object-dtype columns
to inferred dtype.
Parameters
----------
values : np.ndarray[object]
datetime : bool, default True
numeric: bool, default True
timedelta : bool, default True
copy : bool, default True
Returns
-------
np.ndarray
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(copy, "copy")
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError("At least one of datetime, numeric or timedelta must be True.")
# Soft conversions
if datetime:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(values, convert_datetime=True)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=True)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
except (ValueError, TypeError):
pass
else:
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
return values
def convert_dtypes(
input_array: AnyArrayLike,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
) -> Dtype:
"""
Convert objects to best possible type, and optionally,
to types supporting ``pd.NA``.
Parameters
----------
input_array : ExtensionArray, Index, Series or np.ndarray
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
Returns
-------
dtype
new dtype
"""
is_extension = is_extension_array_dtype(input_array.dtype)
if (
convert_string or convert_integer or convert_boolean or convert_floating
) and not is_extension:
try:
inferred_dtype = lib.infer_dtype(input_array)
except ValueError:
# Required to catch due to Period. Can remove once GH 23553 is fixed
inferred_dtype = input_array.dtype
if not convert_string and is_string_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_integer:
target_int_dtype = "Int64"
if is_integer_dtype(input_array.dtype):
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
inferred_dtype = INT_STR_TO_DTYPE.get(
input_array.dtype.name, target_int_dtype
)
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
inferred_dtype = target_int_dtype
else:
if is_integer_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_floating:
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
inferred_float_dtype = FLOAT_STR_TO_DTYPE.get(
input_array.dtype.name, "Float64"
)
# if we could also convert to integer, check if all floats
# are actually integers
if convert_integer:
arr = input_array[notna(input_array)]
if (arr.astype(int) == arr).all():
inferred_dtype = "Int64"
else:
inferred_dtype = inferred_float_dtype
else:
inferred_dtype = inferred_float_dtype
else:
if is_float_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_boolean:
if is_bool_dtype(input_array.dtype):
inferred_dtype = "boolean"
else:
if isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
inferred_dtype = input_array.dtype
else:
inferred_dtype = input_array.dtype
return inferred_dtype
def maybe_castable(arr: np.ndarray) -> bool:
# return False to force a non-fastpath
assert isinstance(arr, np.ndarray) # GH 37024
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == "M":
return is_datetime64_ns_dtype(arr.dtype)
elif kind == "m":
return is_timedelta64_ns_dtype(arr.dtype)
return arr.dtype.name not in POSSIBLY_CAST_DTYPES
def maybe_infer_to_datetimelike(
value: Union[ArrayLike, Scalar], convert_dates: bool = False
):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : bool, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
# TODO: why not timedelta?
if isinstance(
value, (ABCDatetimeIndex, ABCPeriodIndex, ABCDatetimeArray, ABCPeriodArray)
):
return value
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if v.ndim != 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
v = tslib.array_to_datetime(v, require_iso8601=True, errors="raise")[0]
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
from pandas import DatetimeIndex
try:
values, tz = | conversion.datetime_to_datetime64(v) | pandas._libs.tslibs.conversion.datetime_to_datetime64 |
import os
import time
import pandas as pd
from geopy.exc import GeocoderTimedOut
from geopy.geocoders import Nominatim
def straat2coord(file_path: str, woonplaats: str, woonplaats_header: str, adres_header: str, sep: str = ";") -> None:
"""Berekend aan de hand van een CSV-bestand de breedte- en hoogtegraad.
Resultaten worden opgeslagen in een nieuw CSV-bestand `data/geoDataKDV.csv`.
Als input wordt om een woonplaats gevraagd. Alle punten die aan de waarde 'woonplaats voldoen'
in de kolom 'woonplaatsHeader' worden geimporteerd.
De breedte- en lengtegraad van de waardes die zich bevinden in de kolom 'adresHeader' worden opgevraagd.
Duplicaten worden direct overgeslagen.
:param file_path: totale path naar het te converteren bestand
:param woonplaats: woonplaats waar een selectie uit (landelijke) data word gehaald
:param woonplaats_header: kolom waar de waarde `woonplaats` zich in bevind
:param adres_header: kolom met de adressen die omgezet mooeten worden. Idealieter adres + huisnummer
:param sep: separator voor CSV-bestand, standaard ';' maar kan ook ',' of iets anders zijn
:returns: Geconverteerd bestand opgeslagen in data/output/. Bestand bevat de headers latitude en longitude
"""
if not isinstance(file_path, str) or not isinstance(woonplaats, str) \
or not isinstance(woonplaats_header, str) or not isinstance(adres_header, str):
raise ValueError("Verkeerde waardes meegegeven als argumenten")
print("Even geduld a.u.b, dit kan even duren...")
csv_data = pd.read_csv(file_path, sep=sep) # Data uitlezen uit bestand
subset = csv_data.loc[csv_data[woonplaats_header] == woonplaats] # Selectie maken van de data
geo_locator = Nominatim(user_agent="IPASS Project - <NAME> 2019") # Variabele opzetten voor API-calls
geo_locaties = | pd.DataFrame(columns=["latitude", "longitude"]) | pandas.DataFrame |
"""
Created on Sat Oct 20 2018
Modules for extracting data from the Chronos database
@author: <NAME>
"""
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from context import dbl
# import DataBaseLibrary as dbl
#from pydream.core import run_dream
#from pydream.parameters import SampledParam
#from pydream.convergence import Gelman_Rubin
#import inspect
# Meteorological data will be obtained from two sources:
# 1: a close by weather station (for WM Berkhout, for BB: Lelystad)
# we will use the evapotranspiration data obtained from the weather station...
# 2: rainfall from the 1km gridded radar corrected interpolated rainfall data obtained
# from climate4impact...
# surface areas of Kragge compartment 3 and 4
topArea = np.array([57593,61012]) #m2
baseArea = np.array([56137, 58206]) # m2
baseLevel = np.array([5.04, 5.01]) # m
fCrop = 1 # optimized crop factor by Loys Vermeijden (2018)
weather_station = '350'
pklfile = 'meteoGilzeRijen.pkl'
t_range = ['20030101','20210101']
pklfile = './DataFiles/meteoKR.bz2'
#path = './MeteoData/WM_Rain_2008-2019.bz2'
# Read data from close by meteo station
meteo_data_stat = dbl.download_meteoKNMI (t_range, weather_station, pklfile)
meteo_data_stat = meteo_data_stat.rename(columns={'rain':'rain_station'})
# Read data from gridded radar corrected interpolated rainfall data
#ain_radar = pd.read_pickle(fpath,compression='infer')
# transform rain values from kg/m2 (mm) to m water column
#ain_radar['rain'] = rain_radar['rain']/1e3
# Merge the station data and the interpolated rain data in to a single dataframe
meteo_data = meteo_data_stat
# meteo_data is top boundary condition. We run the model from 2003 onward
meteo_data = meteo_data[slice('2003-01-01','2021-01-01')]
pump_code = 'CF013'
pklfile = './DataFiles/flowdata_CF013.bz2'
measFreq = 1
trange = pd.date_range(start='2003-01-01',end = '2020-12-31',freq='D')
tmeas = pd.date_range(start='2012-06-12',end = '2020-12-31',freq='D')
tcalib1 = | pd.date_range(start='2012-06-12',end = '2017-03-03',freq='D') | pandas.date_range |
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 2]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, "2013-10-20"]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_group_survival_table_with_weights():
df = load_waltons()
dfw = df.groupby(["T", "E", "group"]).size().reset_index().rename(columns={0: "weights"})
gw, removedw, observedw, censoredw = utils.group_survival_table_from_events(
dfw["group"], dfw["T"], dfw["E"], weights=dfw["weights"]
)
assert len(gw) == 2
assert all(removedw.columns == ["removed:miR-137", "removed:control"])
assert all(removedw.index == observedw.index)
assert all(removedw.index == censoredw.index)
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert_frame_equal(removedw, removed)
assert_frame_equal(observedw, observed)
| assert_frame_equal(censoredw, censored) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
def combineData(df_list):
combined = pd.DataFrame()
for df in df_list:
combined = combined.append(df)
combined = combined.fillna(method='ffill').drop_duplicates()
return combined
def modifyWeapons(df):
modified = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
from meterstick import utils
import mock
import numpy as np
import pandas as pd
from pandas import testing
from scipy import stats
import unittest
class DistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
distribution = operations.Distribution('grp', sum_x)
def test_distribution(self):
output = self.distribution.compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_normalize(self):
output = operations.Normalize('grp', self.sum_x).compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
dist = operations.Distribution(['grp', 'platform'], sum_x)
output = dist.compute_on(df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 0.5, 0.25, 0.25],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_melted(self):
output = self.distribution.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.25, 0.75],
'grp': ['A', 'B'],
'Metric': ['Distribution of sum(X)', 'Distribution of sum(X)']
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby(self):
output = self.distribution.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_melted(self):
output = self.distribution.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'Metric': ['Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 1, 2, 3.5],
'grp': ['A', 'A', 'B', 'B'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.distribution.compute_on(df, ['grp0', 'country'])
bar = self.distribution.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.distribution.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
testing.assert_frame_equal(output, expected)
def test_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.Distribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Distribution of sum(X)': [0.25, 0.75],
'Distribution of count(X)': [0.5, 0.5]
},
index=['A', 'B'],
columns=['Distribution of sum(X)', 'Distribution of count(X)'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_where(self):
metric = operations.Distribution('grp', self.sum_x, where='country == "US"')
metric_no_filter = operations.Distribution('grp', self.sum_x)
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
testing.assert_frame_equal(output, expected)
def test_distribution_pipeline(self):
output = self.sum_x | operations.Distribution('grp') | metrics.compute_on(
self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.Distribution('grp', sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('grp').X.sum(), sum_x.get_cached(42, 'grp'))
self.assertTrue(metric.in_cache(42))
def test_distribution_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.Distribution('grp', sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_distribution_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'cookie': [1, 2, 1, 2]
})
sum_x = metrics.Sum('X')
m = operations.Distribution('grp', sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class CumulativeDistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['B', 'B', 'A', 'A'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
metric = operations.CumulativeDistribution('grp', sum_x)
def test_cumulative_distribution(self):
output = self.metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.75, 1.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
cum_dict = operations.CumulativeDistribution(['grp', 'platform'], sum_x)
output = cum_dict.compute_on(df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 0.5, 0.75, 1],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_melted(self):
output = self.metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.75, 1.],
'grp': ['A', 'B'],
'Metric': ['Cumulative Distribution of sum(X)'] * 2
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby(self):
output = self.metric.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 1. / 3, 1.],
'grp': ['A', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby_melted(self):
output = self.metric.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 1. / 3, 1.],
'grp': ['A', 'A', 'B'],
'Metric': ['Cumulative Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 2, 1.5, 3],
'grp': ['B', 'B', 'A', 'A'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.metric.compute_on(df, ['grp0', 'country'])
output.sort_index(level=['grp0', 'grp', 'country'], inplace=True)
bar = self.metric.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.metric.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected = expected.sort_index(level=['grp0', 'grp', 'country'])
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_order(self):
metric = operations.CumulativeDistribution('grp', self.sum_x, ('B', 'A'))
output = metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.25, 1.]},
index=['B', 'A'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_ascending(self):
metric = operations.CumulativeDistribution(
'grp', self.sum_x, ascending=False)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.25, 1.]},
index=['B', 'A'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_order_splitby(self):
metric = operations.CumulativeDistribution('grp', self.sum_x, ('B', 'A'))
output = metric.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 2. / 3, 1.],
'grp': ['A', 'B', 'A'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.CumulativeDistribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Cumulative Distribution of sum(X)': [0.75, 1.],
'Cumulative Distribution of count(X)': [0.5, 1.]
},
index=['A', 'B'],
columns=[
'Cumulative Distribution of sum(X)',
'Cumulative Distribution of count(X)'
])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_where(self):
metric = operations.CumulativeDistribution(
'grp', metrics.Count('X'), where='country == "US"')
metric_no_filter = operations.CumulativeDistribution(
'grp', metrics.Count('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_pipeline(self):
output = self.sum_x | operations.CumulativeDistribution(
'grp') | metrics.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.75, 1.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.CumulativeDistribution('grp', sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('grp').X.sum(), sum_x.get_cached(42, 'grp'))
self.assertTrue(metric.in_cache(42))
def test_cumulative_distribution_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.CumulativeDistribution('grp', sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_cumulative_distribution_with_jackknife_internal_caching_cleaned_up(
self):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['B', 'B', 'A', 'A'],
'country': ['US', 'US', 'US', 'EU'],
'cookie': [1, 2, 1, 2]
})
sum_x = metrics.Sum('X')
m = operations.CumulativeDistribution('grp', sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class PercentChangeTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
metric_lst = metrics.MetricList((metrics.Sum('X'), metrics.Count('X')))
def test_percent_change(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_include_baseline(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[0., 0.], [150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[0, 1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_melted(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [150., 0.],
'Metric': ['sum(X) Percent Change', 'count(X) Percent Change'],
'Condition': [1, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pandas as pd
import xlrd
import os
import warnings
import numpy as np
from scipy import stats
import datetime
from scipy.signal import argrelextrema
from sklearn.linear_model import LinearRegression
from side_functions import *
class MSData(object):
def __init__(self, filename=None, filetype=None, instrument=None):
"""
object holding LA-ICP-MS data for data reduction
:param filename: str name of the file of measured MS data
`:param filetype: str type of the file ['csv', 'xlsx', 'asc']
:param instrument: str type of MS instrument used ['Element', 'Agilent']
"""
if filename:
if instrument == 'Element':
skipfooter = 4
header = 1
drop = 9
elif instrument == 'Agilent':
skipfooter = 4
header = 3
drop = 3
else:
skipfooter = 0
header = 0
drop = 0
if filetype == 'xlsx':
pwd = os.getcwd()
os.chdir(os.path.dirname(filename))
self.imported = pd.ExcelFile(filename)
self.data = self.imported.parse(
0, index_col=0, skipfooter=skipfooter, header=header)
self.data = self.data.drop(self.data.index[:drop], axis=0)
os.chdir(pwd)
# TODO xlsx doesnt work with agilent type
elif filetype == 'csv':
pwd = os.getcwd()
os.chdir(os.path.dirname(filename))
self.data = pd.read_csv(filename, sep=',', index_col=0, skipfooter=skipfooter,
header=header, engine='python')
os.chdir(pwd)
elif filetype == 'asc':
pwd = os.getcwd()
os.chdir(os.path.dirname(filename))
self.data = pd.read_csv(filename, sep='\t', index_col=0, skipfooter=skipfooter,
header=header, engine='python')
self.data = self.data.drop(self.data.index[:drop], axis=0)
self.data.dropna(axis=1, how='all', inplace=True)
self.data = self.data.apply(pd.to_numeric, errors='coerce')
os.chdir(pwd)
else:
warnings.warn('File type not supported.')
self.data.index = self.data.index.astype('float32')
self.time = self.data.index
self.elements = list(map(elem_resolution, self.data.columns))
self.data.columns = self.elements
self.srms = pd.ExcelFile('./SRM.xlsx').parse(index_col=0)
self.sum_koeficients = pd.ExcelFile(
'./default_sum_koef.xlsx').parse(0, index_col=0, header=None).to_dict()[1]
self.srm = None
self.iolite = None
self.names = None
self.internal_std = None
self.ablation_time = None
self.laser_off = []
self.laser_on = []
self.skip = {'bcg_start': 0,
'bcg_end': 0,
'sample_start': 0,
'sample_end': 0} # time in seconds to skip from each bcg and sample
self.filter_line = None
self.starts = None
self.ends = None
self.bcg = None
self.average_peaks = None
self.ratio = None
self.quantified = None
self.lod = None
self.correction_elements = None
self.corrected_IS = None
self.corrected_SO = None
self.dx = None
self.dy = None
self.maps = {}
self.qmaps = {}
self.regression_values = {}
self.regression_equations = {}
def read_param(self, path):
xl = pd.ExcelFile(path)
if 'names' in xl.sheet_names:
self.names = list(xl.parse('names', header=None)[0])
if 'internal standard' in xl.sheet_names:
self.internal_std = xl.parse(
'internal standard', index_col=0, header=0)
if 'total sum' in xl.sheet_names:
self.sum_koeficients = xl.parse(
'total sum', index_col=0, header=None).to_dict()[1]
def read_iolite(self, path):
pwd = os.getcwd()
os.chdir(os.path.dirname(path))
self.iolite = pd.read_csv(path, sep=",", engine='python')
os.chdir(pwd)
print(names_from_iolite(self.iolite))
def plot_data(self, ax=None):
if ax is None:
fig, ax = plt.subplot()
ax.plot(self.data[self.laser_on, :])
ax.show()
def set_filtering_element(self, element):
if element == 'sum':
self.filter_line = self.data.sum(1)
elif element in self.elements:
self.filter_line = self.data[element]
else:
warnings.warn(
'Element selected for filtering laser ON not in measured elements. Falling back to sum.')
self.filter_line = self.data.sum(1)
def set_ablation_time(self, time):
# set time of ablation spot/line in seconds
self.ablation_time = time
def set_skip(self, bcg_s=None, bcg_e=None, sig_s=None, sig_e=None):
# set time skipped on start and end of background and ablation in seconds
if bcg_s is not None:
self.skip['bcg_start'] = bcg_s
if bcg_e is not None:
self.skip['bcg_end'] = bcg_e
if sig_s is not None:
self.skip['sample_start'] = sig_s
if sig_e is not None:
self.skip['sample_end'] = sig_e
def time_to_number(self, time):
"""
takes time in seconds returns number of measured values
depends on integration time of MS method
"""
val = len(self.time[0:(
np.abs(np.array(self.time.values, dtype=np.float32)-np.abs(time))).argmin()])
if time < 0:
val = -val
return val
def create_selector_iolite(self, start):
# select starts and ends of ablation using iolite file
if self.iolite.empty:
print('Warning: Iolite not created.')
return
lst = [x for x in self.iolite.loc[:6,
' Comment'] if isinstance(x, str)]
start = self.time[get_index(self.data, start)]
if len(lst) == 2:
print('>>> Selecting spots.')
difflst = get_diff_lst(self.iolite)
elif len(lst) == 1:
print('>>> Selecting lines.')
difflst = get_diff_lst_line(self.iolite)
else:
print('Warning: Iolite not selected.')
timeindex = []
for i in range(0, len(difflst)+1):
timeindex.append(sum(difflst[:i])+start)
index = [get_index(self.data, x) for x in timeindex]
self.starts = [index[i] for i in range(len(index)) if i % 2 == 0]
self.ends = [index[i] for i in range(len(index)) if i % 2 != 0]
self.create_on_off()
def create_selector_bcg(self, bcg_sd, bcg_time):
"""
select starts and ends of ablation based on selected element or sum of all using treshold
calculated from background
"""
bcg_nr = self.time_to_number(bcg_time)
bcg = self.filter_line.iloc[0:bcg_nr].mean()
std = self.filter_line.iloc[0:bcg_nr].std()
ind = [True if value > bcg+bcg_sd *
std else False for value in self.filter_line]
ind2 = ind[1:]
ind2.append(False)
index = [i for i in range(0, len(ind)) if ind[i] != ind2[i]]
self.starts = [index[i] for i in range(len(index)) if i % 2 == 0]
self.ends = [index[i] for i in range(len(index)) if i % 2 != 0]
self.create_on_off()
def create_selector_gradient(self, time_of_cycle=100):
"""
selects starts and ends of ablation based on selected element or sum of all using gradient
param time_of_cycle: time of the ablation and half of the pause between ablations in seconds
"""
n = self.time_to_number(
time_of_cycle) # number of values for one spot and half bcg
self.ends = list(argrelextrema(np.gradient(
self.filter_line.values), np.greater_equal, order=n)[0])
self.starts = list(argrelextrema(np.gradient(
self.filter_line.values), np.less_equal, order=n)[0])
print(self.starts)
print(self.ends)
self.create_on_off()
def create_on_off(self):
"""
from starts and ends of ablation create laser_on and laser_off with skipped values
"""
self.laser_off = []
self.laser_on = []
self.laser_off.append(
(0+self.time_to_number(self.skip['bcg_start']), self.starts[0]-self.time_to_number(self.skip['bcg_end'])))
for i in range(len(self.starts)-1):
self.laser_off.append((self.ends[i]+self.time_to_number(
self.skip['bcg_start']), self.starts[i+1]-self.time_to_number(self.skip['bcg_end'])))
self.laser_on.append((self.starts[i]+self.time_to_number(
self.skip['sample_start']), self.ends[i]-self.time_to_number(self.skip['sample_end'])))
self.laser_off.append((self.ends[-1]+self.time_to_number(self.skip['bcg_start']), len(
self.time)-2-self.time_to_number(self.skip['bcg_end'])))
self.laser_on.append((self.starts[-1]+self.time_to_number(
self.skip['sample_start']), self.ends[-1]-self.time_to_number(self.skip['sample_end'])))
def graph(self, ax=None, logax=False, el=None):
"""
create matplotlib graph of intensity in time for ablation
highlights ablation part and background signal
"""
if ax == None:
fig, ax = plt.subplots()
ax.cla()
ax.clear()
# if element is defined, plot only one element, otherwise all
if el:
self.data.plot(ax=ax, y=el, kind='line', legend=False)
else:
self.data.plot(ax=ax, kind='line', legend=False)
if logax:
ax.set_yscale('log')
if self.starts and self.ends:
# create lines for start and end of each ablation
for i in range(0, len(self.starts)):
ax.axvline(x=self.time[self.starts[i]],
color='blue', linewidth=2)
for i in range(0, len(self.ends)):
ax.axvline(x=self.time[self.ends[i]],
color='blue', linewidth=2)
if self.laser_off:
# higlights bacground
for off in self.laser_off:
#print(self.time[off[0]], self.time[off[1]])
try:
ax.axvspan(
self.time[off[0]], self.time[off[1]], alpha=0.2, color='red')
except:
warnings.warn('something is wrong')
if self.laser_on:
# higlihts ablation
for on in self.laser_on:
ax.axvspan(self.time[on[0]], self.time[on[1]],
alpha=0.2, color='green')
plt.show()
def set_srm(self, srm):
# select reference material used for quantification
if isinstance(srm, list):
self.srm = self.srms.loc[srm, :]
return
if srm in self.srms.index:
self.srm = self.srms.loc[srm, :]
def background(self, elem, scale):
if scale == 'beginning':
line = list(self.data[elem])
self.bcg = line[:self.laser_off[0][1]]
print(self.bcg)
# self.bcg = sum(self.bcg)/len(self.bcg)
# print(self.bcg)
def setxy(self, dx, dy):
# set x and y distance for elemental map
self.dx = dx
self.dy = dy
def integrated_area(self, elem):
# calculate area of a spot for given element
# returns list of areas
areas = []
line = self.data[elem]
if not self.laser_on and not self.laser_off:
print('Warning')
return
for i in range(0, len(self.laser_on)):
on = self.laser_on[i]
off_before = self.laser_off[i]
off_after = self.laser_off[i+1]
sample_y = list(line)[on[0]:on[1]]
sample_x = list(line.index)[on[0]:on[1]]
bcg_y = list(line)[off_before[0]:off_before[1]] + \
list(line)[off_after[0]:off_after[1]]
bcg_x = list(line.index)[
off_before[0]:off_before[1]] + list(line.index)[off_after[0]:off_after[1]]
gradient, intercept, r_value, p_value, std_err = stats.linregress(
bcg_x, bcg_y)
new_y_sample = [gradient*x+intercept for x in sample_x]
areas.append(np.trapz(sample_y, sample_x) -
np.trapz(new_y_sample, sample_x))
return areas
def mean_intensity(self, elem, scale):
# calculate mean intensity of a spot for given element
# returns list of means
means = []
line = self.data[elem]
if not self.laser_on and not self.laser_off:
print('Warning')
return
if scale == 'beginning':
self.background(elem, scale)
bcg_y = self.bcg
for i in range(0, len(self.laser_on)):
on = self.laser_on[i]
off_before = self.laser_off[i]
off_after = self.laser_off[i+1]
sample_y = list(line)[on[0]:on[1]]
if scale == 'all':
bcg_y = list(line)[off_before[0]:off_before[1]] + \
list(line)[off_after[0]:off_after[1]]
means.append(np.mean(outlier_detection(sample_y)) -
np.mean(outlier_detection(bcg_y)))
return means
def average(self, method='area', scale='all'):
# calculate average signal for each spot with substracted background
# method: 'area' uses integration of the peak 'intensity' uses mean of intensities
self.average_peaks = pd.DataFrame(columns=list(self.elements))
for elem in self.elements:
if method == 'area':
self.average_peaks[elem] = (self.integrated_area(elem))
if method == 'intensity':
self.average_peaks[elem] = (self.mean_intensity(elem, scale))
if self.names:
try:
self.average_peaks.index = self.names
except ValueError as e:
warnings.warn('Unable to match peak names to data.')
print(e)
else:
self.names = ['peak_{}'.format(i) for i in range(
1, len(self.average_peaks.index)+1)]
self.average_peaks.index = self.names
def quantification(self):
# calculate quantification of intensities or areas using selected reference material
if not self.names or self.srm.empty or self.average_peaks.empty:
warnings.warn('Missing data.')
spots = self.average_peaks.iloc[[
i for i, val in enumerate(self.names) if val != self.srm.name]]
print(spots)
stdsig = self.average_peaks.iloc[[i for i, val in enumerate(
self.names) if val == self.srm.name]].mean(axis=0)
print(stdsig)
self.ratio = [float(self.srm[element_strip(el)]) /
float(stdsig[el]) for el in stdsig.index]
print(self.ratio)
self.quantified = spots.mul(self.ratio, axis='columns')
def detection_limit(self, method='area', scale='all'):
"""
calculate limit of detection for analysis
param: method = ['area','intensity'] use same method as for the average
param: scale = ['beginning', 'all']
"""
if scale == 'all':
bcg = pd.DataFrame(columns=self.data.columns)
for (s, e) in self.laser_off:
bcg = pd.concat([bcg, self.data.iloc[np.r_[s:e], :]])
elif scale == 'beginning':
bcg = self.data.iloc[self.laser_off[0][0]:self.laser_off[0][1]]
if method == 'area':
self.lod = (bcg.std()*self.ablation_time).mul(self.ratio)
elif method == 'intensity':
self.lod = (bcg.std()*3).mul(self.ratio)
self.lod.name = 'LoD'
def internal_standard_correction(self):
# calculates correction for each element given in internal standard correction from PARAM file
print(self.internal_std.columns)
self.corrected_IS = []
if self.internal_std.empty:
return
self.correction_elements = list(self.internal_std.columns)
print(self.correction_elements)
for el in self.correction_elements:
print(el)
if el in self.elements:
self.corrected_IS.append(correction(
self.quantified, el, self.internal_std))
def total_sum_correction(self, suma=1000000):
# calculates total sum correction using coefficients given in PARAM file
if not self.sum_koeficients:
warnings.warn('Missing coeficients for total sum correction.')
return
print(self.sum_koeficients)
self.corrected_SO = self.quantified.copy()
for key in self.sum_koeficients:
elem = element_formater(key, self.corrected_SO.columns)
if not elem:
continue
self.corrected_SO[elem] = self.corrected_SO[elem] / \
self.sum_koeficients[key] * 100
koef = suma/self.corrected_SO.sum(1)
self.corrected_SO = self.corrected_SO.mul(list(koef), axis='rows')
for key in self.sum_koeficients:
elem = element_formater(key, self.corrected_SO.columns)
if not elem:
continue
self.corrected_SO[elem] = self.corrected_SO[elem] * \
self.sum_koeficients[key] / 100
def report(self):
if self.corrected_SO is not None:
self.corrected_SO = self.corrected_SO.append(self.lod)
for column in self.corrected_SO:
self.corrected_SO[column] = [
round_me(value, self.lod, column) for value in self.corrected_SO[column]]
if self.corrected_IS is not None:
self.corrected_IS = [df.append(self.lod)
for df in self.corrected_IS]
for df in self.corrected_IS:
for column in df:
df[column] = [round_me(value, self.lod, column)
for value in df[column]]
if self.quantified is not None:
self.quantified = self.quantified.append(self.lod)
for column in self.quantified:
self.quantified[column] = [
round_me(value, self.lod, column) for value in self.quantified[column]]
def save(self, path, data=None):
if data is None and self.quantified is not None:
data = self.quantified
elif data is None and self.quantified is None:
warnings.warn('No data to save.')
writer = pd.ExcelWriter(path, engine='xlsxwriter')
if isinstance(data, list):
for item, e in zip(self.corrected_IS, self.correction_elements):
item.to_excel(writer, sheet_name='Normalised_{}'.format(e))
else:
data.to_excel(writer, sheet_name='report')
writer.save()
def matrix_from_time(self, elem, bcg):
# create elemental map from time resolved LA-ICP-MS data
if self.dx is None or self.dy is None:
print('Warning: Missing values dx or dy.')
return
line = self.data[elem]
d = {}
tmpy = 0
if bcg == 'beginning':
bcg_lst = list(line)[self.laser_off[0][0]:self.laser_off[0][1]]
print(bcg_lst)
for i in range(0, len(self.laser_on)):
on = self.laser_on[i]
off_before = self.laser_off[i]
off_after = self.laser_off[i + 1]
tmpy = tmpy + self.dy
if bcg == 'beginning':
print('using beginning')
elif bcg == 'all':
bcg_lst = list(line)[off_before[0]:off_before[1]] + \
list(line)[off_after[0]:off_after[1]]
print(bcg_lst)
else:
print('Warning: not a valid background method')
arr = np.array(line)[on[0]:on[1]] - np.mean(bcg_lst)
arr[arr < 0] = 0
d[tmpy] = arr
df = pd.DataFrame.from_dict(d, orient='index')
tmpx = range(self.dx, self.dx * len(df.columns) + self.dx, self.dx)
df.columns = tmpx
return df
def create_all_maps(self, bcg=None):
for el in self.elements:
self.maps[el] = self.matrix_from_time(el, bcg)
def rotate_map(self, elem):
if elem in self.maps.keys():
rotated = np.rot90(self.maps[elem])
if rotated.shape[0] == len(self.maps[elem].index):
indexes = self.maps[elem].index
else:
indexes = self.maps[elem].columns
if rotated.shape[1] == len(self.maps[elem].columns):
columns = self.maps[elem].columns
else:
columns = self.maps[elem].index
self.maps[elem] = pd.DataFrame(
rotated, columns=columns, index=indexes)
else:
print('Warning: Matrix does not exists.')
def flip_map(self, elem, axis):
if elem in self.maps.keys():
flipped = np.flip(self.maps[elem].values, axis)
self.maps[elem] = pd.DataFrame(
flipped, columns=self.maps[elem].columns, index=self.maps[elem].index)
else:
print('Warning: Matrix does not exists.')
def elemental_image(self, elem, fig=None, ax=None, vmin=None, vmax=None, clb=True, axis=True,
colourmap='jet', interpolate='none', title='', units='', quantified=False, *args, **kwargs):
if fig is None or ax is None:
fig, ax = plt.subplots()
ax.cla()
if quantified is True:
if elem in self.qmaps.keys():
data = self.qmaps[elem]
else:
warnings.warn('Elemental map not quantified.')
data = self.qmaps[elem]
else:
if elem in self.maps.keys():
data = self.maps[elem]
else:
warnings.warn('Elemental map not generated.')
data = self.qmaps[elem]
im = ax.imshow(data, vmin=vmin, vmax=vmax, cmap=colourmap, interpolation=interpolate,
extent=[0, self.maps[elem].columns[-1], self.maps[elem].index[-1], 0], *args, **kwargs) # .values
if not axis:
ax.axis('off')
if clb:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
clb = fig.colorbar(im, cax=cax)
clb.ax.set_title(units)
fig.suptitle(title)
# plt.show()
def quantify_map(self, elem, intercept, slope):
if elem not in self.elements:
warnings.warn('Element map doesnt exist.')
return
self.qmaps[elem] = (self.maps[elem]-intercept)/slope
def quantify_all_maps(self):
for elem in self.elements:
self.quantify_map(
elem=elem, intercept=self.regression_equations[elem][0], slope=self.regression_equations[elem][1])
def export_matrices(self, path, quantified=False):
writer = pd.ExcelWriter(path, engine='xlsxwriter')
if quantified:
for el, mapa in self.qmaps.items():
mapa.to_excel(writer, sheet_name=el)
writer.save()
else:
for el, mapa in self.maps.items():
mapa.to_excel(writer, sheet_name=el)
writer.save()
def import_matrices(self, path):
file = | pd.ExcelFile(path) | pandas.ExcelFile |
"""
Extract sampled paramaters of selected traces and prepare simulation input files with fitted parameters
Outputs:
- 2 csvs with fitting paramerers for a) single best fit and b) n best fits
- 2 csv with samples parameters that can be used as input csv for subsequent simulation (for a and b as above)
- 1 emodl with fitting parameters renamed for each grp for next simulation
- 2 batch files to submit run scenarios for either a) or b) from above
"""
import argparse
import os
import pandas as pd
import numpy as np
import sys
import subprocess
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
from simulation_helpers import shell_header
from sample_parameters import make_identifier, gen_combos
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-s",
"--stem",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default = "Local"
)
parser.add_argument(
"--traces_to_keep_ratio",
type=int,
help="Ratio of traces to keep out of all trajectories",
default=10
)
parser.add_argument(
"--traces_to_keep_min",
type=int,
help="Minimum number of traces to keep, might overwrite traces_to_keep_ratio for small simulations",
default=5
)
parser.add_argument(
"--trace_to_run",
type=str,
choices=["ntraces", "besttrace",None],
help="Whether to run single best trace or n best traces as defined in traces_to_keep_ratio ",
default=None
)
return parser.parse_args()
def modify_emodl_and_save(exp_name,output_path):
"""Reads in emodl file and renames the parameters that had been identified in exact_sample_traces
with grp_suffix to have grp specific parameters.
Assumptions:
1 - each fitting parameters occurs once or twice the lengths as the defined groups (i.e. EMS-1 to 11)
2 - if parameters occur twice for each group, they do that in repeated order (i.e. EMS-1, EMS-1, EMS-2, EMS-2 ...)
3 - duplicated group names are not wanted and removed if accidentally added (i.e. EMS-1_EMS-1)
"""
grp_list = get_grp_list(exp_name)
grp_suffix = grp_list[-1].split('_')[0]
param_cols = pd.read_csv(os.path.join(output_path, f'fitted_parameters_besttrace.csv')).columns
param_cols = [i for i in param_cols if grp_suffix in i]
param_cols_unique = param_cols
for grp in reversed(grp_list):
param_cols_unique = [col.replace(f'_{grp}', '') for col in param_cols_unique]
param_cols_unique = list(set(param_cols_unique))
emodl_name = [file for file in os.listdir(output_path) if 'emodl' in file][0].replace('.emodl','')
emodl_name_new = f'{emodl_name}_resim'
fin = open(os.path.join(output_path, f'{emodl_name}.emodl'), "rt")
emodl_txt = fin.read()
fin.close()
emodl_chunks = emodl_txt.split('@')
sample_cols=[]
for col in param_cols_unique:
col_pos = []
for i, chunk in enumerate(emodl_chunks):
if col in chunk:
col_pos = col_pos + [i]
for i, pos in enumerate(col_pos):
#print(emodl_chunks[pos])
if len(col_pos) <len(grp_list):
sample_cols = sample_cols + [col]
if len(col_pos) == len(grp_list):
emodl_chunks[pos] = f'{emodl_chunks[pos]}_{grp_list[i]}'
if len(col_pos) == len(grp_list)*2:
"""assuming if occuring twice, its the same grp in two consecutive instances"""
grp_list_dup = [grp for grp in grp_list for i in range(2)]
emodl_chunks[pos] = f'{emodl_chunks[pos]}_{grp_list_dup[i]}'
#print(emodl_chunks[pos])
emodl_txt_new = '@'.join(emodl_chunks)
for grp in grp_list:
emodl_txt_new = emodl_txt_new.replace(f'{grp}_{grp}',f'{grp}')
fin = open(os.path.join(output_path, f'{emodl_name_new}.emodl'), "w")
fin.write(emodl_txt_new)
fin.close()
def write_submission_file(trace_selection,Location, r= 'IL',model='locale'):
"""Writes batch file that copies required input csvs and emodl to the corresponding location in git_dir
Assumptions:
Running location fixed to IL for spatial model (momentarily)
"""
emodl_name = [file for file in os.listdir(output_path) if 'emodl' in file][0].replace('.emodl','')
sample_csv = f'sample_parameters_{trace_selection}.csv'
input_csv_str = f' --sample_csv {sample_csv}'
model_str = f' --model {model}'
new_exp_name = f'{exp_name}_resim_{trace_selection}'
csv_from = os.path.join(output_path, sample_csv ).replace("/","\\")
csv_to = os.path.join(git_dir,"experiment_configs","input_csv").replace ("/","\\")
emodl_from = os.path.join(output_path,emodl_name+"_resim.emodl")
emodl_to = os.path.join(git_dir,"emodl",emodl_name+"_resim.emodl").replace("/","\\")
if Location =='Local':
file = open(os.path.join(output_path, 'bat', f'00_runScenarios_{trace_selection}.bat'), 'w')
file.write(
f'copy {csv_from} {csv_to}\n'
f'copy {emodl_from} {emodl_to}\n'
f'cd {git_dir} \n python runScenarios.py -r {r} '
f'-e {str(emodl_name)}_resim.emodl -n {new_exp_name} {model_str} {input_csv_str} \npause')
file.close()
if Location =='NUCLUSTER':
csv_from = csv_from.replace("\\","/")
csv_to = csv_to.replace("\\","/")
emodl_to = emodl_to.replace("\\","/")
jobname = 'runFittedParamSim'
header = shell_header(job_name=jobname)
commands = f'\ncp {csv_from} {csv_to}\n' \
f'\ncp {emodl_from} {emodl_to}\n' \
f'\ncd {git_dir} \n python runScenarios.py -r {r} ' \
f'-e {str(emodl_name)}_resim.emodl -n {new_exp_name} {model_str} {input_csv_str}'
file = open(os.path.join(output_path,'sh', f'00_runScenarios_{trace_selection}.sh'), 'w')
file.write(header + commands)
file.close()
file = open(os.path.join(output_path, f'submit_runScenarios_{trace_selection}.sh'), 'w')
file.write(
f'cd {os.path.join(output_path,"sh")}\n'
f'sbatch 00_runScenarios_{trace_selection}.sh\n')
file.close()
def extract_sample_traces(exp_name,traces_to_keep_ratio, traces_to_keep_min):
"""Identifies parameters that vary as fitting parameters and writes them out into csvs.
Combines fitting with sample parameters to simulate 'full' simulation.
Assumption:
Parameter that wish to no be grp specific were fixed
(could be aggregated before fitting, which needs to be edited together with trace_selection.py)
"""
df_samples = pd.read_csv(os.path.join(output_path, 'sampled_parameters.csv'))
"""Drop parameter columns that have equal values in all scenarios (rows) to assess fitted parameters"""
nunique = df_samples.apply(pd.Series.nunique)
cols_to_drop = nunique[nunique == 1].index
df_samples = df_samples.drop(cols_to_drop, axis=1)
grp_list = get_grp_list(exp_name)
df_traces = pd.DataFrame()
for grp in grp_list:
grp_nr = grp.split('_')[-1]
grp_suffix= grp.split('_')[0]
"""Drop parameters that correspond to other regions"""
grp_channels = [i for i in df_samples.columns if grp_suffix in i]
grp_cols_to_drop = [i for i in grp_channels if grp_nr != i.split('_')[-1]]
df_samples_sub = df_samples.drop(grp_cols_to_drop, axis=1)
rank_export_df = pd.read_csv(os.path.join(output_path, f'traces_ranked_region_{str(grp_nr)}.csv'))
n_traces_to_keep = int(len(rank_export_df) / traces_to_keep_ratio)
if n_traces_to_keep < traces_to_keep_min and len(rank_export_df) >= traces_to_keep_min:
n_traces_to_keep = traces_to_keep_min
if len(rank_export_df) < traces_to_keep_min:
n_traces_to_keep = len(rank_export_df)
df_samples_sub = pd.merge(how='left', left=rank_export_df[['scen_num','norm_rank']], left_on=['scen_num'], right=df_samples_sub, right_on=['scen_num'])
df_samples_sub = df_samples_sub.sort_values(by=['norm_rank']).reset_index(drop=True)
try:
df_samples_sub = df_samples_sub.drop(['scen_num', 'sample_num', 'norm_rank'], axis=1)
except:
df_samples_sub = df_samples_sub.drop(['scen_num', 'norm_rank'], axis=1)
df_samples_sub.columns = df_samples_sub.columns + f'_{str(grp)}'
df_samples_sub.columns = [col.replace( f'_{str(grp)}_{str(grp)}', f'_{str(grp)}') for col in df_samples_sub.columns ]
df_samples_sub['row_num'] = df_samples_sub.index
if df_traces.empty:
df_traces = df_samples_sub
else:
df_traces = | pd.merge(how='left', left=df_traces, left_on=['row_num'], right=df_samples_sub, right_on=['row_num']) | pandas.merge |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import json
import pandas as pd
import sys
import signal
import time
fname = sys.argv[1]
plt.ion()
fig = plt.figure()
def readStats():
f = open(fname, 'r')
m = json.load(f)
f.close()
plt.clf()
data = pd.DataFrame(m['heap']).get('v')
plt.subplot(211)
plt.title("Heap")
plt.plot(data)
plt.subplot(212)
plt.title("Block")
df = | pd.DataFrame(m['block']) | pandas.DataFrame |
# coding=utf-8
# Author: <NAME>
# Date: Sept 02, 2019
#
# Description: Reads a MultiLayer network (HS, MM & DM) and prints information.
#
#
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import networkx as nx
from utils import get_network_layer, get_network_by_attribute, ensurePathExists
from tabulate import tabulate
from itertools import combinations
def df2md(df, y_index=False, *args, **kwargs):
blob = tabulate(df, headers='keys', tablefmt='pipe', *args, **kwargs)
if not y_index:
return '\n'.join(['| {}'.format(row.split('|', 2)[-1]) for row in blob.split('\n')])
return blob
if __name__ == '__main__':
celltypes = ['spermatocyte', 'spermatogonia', 'spermatid', 'enterocyte', 'neuron', 'muscle']
#
# Node/Edge stats on complete network
#
r = []
for celltype in celltypes:
print('Loading {celltype:s} network'.format(celltype=celltype))
rGfile_gpickle = '../../04-network/results/network/{celltype:s}/net-{celltype:s}-{network:s}.gpickle'.format(celltype=celltype, network='full')
G = nx.read_gpickle(rGfile_gpickle)
for layer in ['HS', 'MM', 'DM']:
print('Separate {layer:s} layer'.format(layer=layer))
Gt = get_network_layer(G, layer)
# Number of nodes/edges
n_nodes = Gt.number_of_nodes()
n_edges = Gt.number_of_edges()
r.append((celltype, layer, n_nodes, n_edges))
print('# Number of nodes/edges in each layer of the full network\n')
df_stat = pd.DataFrame(r, columns=['celltype', 'species', '#-nodes', '#-edges'])
print(df2md(df_stat, floatfmt='.4f'))
file = 'results/stats-full-network.csv'
ensurePathExists(file)
df_stat.to_csv(file)
#
# Node/Edge stats on threshold/conserved Network
#
network = 'conserved' # ['thr', 'conserved']
threshold = 0.5
threshold_str = str(threshold).replace('.', 'p')
#
if network == 'conserved':
celltypes = ['spermatocyte', 'enterocyte']
else:
celltypes = ['spermatocyte', 'spermatogonia', 'spermatid', 'enterocyte', 'neuron', 'muscle']
#
r = []
for celltype in celltypes:
print('Reading {celltype:s}-{network:s}-{threshold:s} network'.format(celltype=celltype, network=network, threshold=threshold_str))
path_net = '../../04-network/results/network/{celltype:s}/'.format(celltype=celltype)
if network in ['thr', 'conserved']:
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}-{threshold:s}.gpickle'.format(celltype=celltype, network=network, threshold=threshold_str)
elif network == 'full':
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}.gpickle'.format(celltype=celltype, network=network)
G = nx.read_gpickle(rGfile_gpickle)
for layer in ['HS', 'MM', 'DM']:
print('Separate {layer:s} layer'.format(layer=layer))
Gt = get_network_layer(G, layer)
# Number of nodes/edges
n_nodes = Gt.number_of_nodes()
n_edges = Gt.number_of_edges()
# Number of components (islands)
n_components = nx.number_connected_components(Gt)
# Largest Component
Gtlc = max(nx.connected_component_subgraphs(Gt), key=len)
n_nodes_largest_component = Gtlc.number_of_nodes()
n_edges_largest_component = Gtlc.number_of_edges()
for weight in ['combined_score', 'textmining', 'database', 'experiments', 'coexpression', 'cooccurence', 'fusion', 'neighborhood']:
edges = [(i, j) for i, j, d in Gt.edges(data=True) if weight in d]
Gtw = Gt.edge_subgraph(edges).copy()
# Number of nodes/edges
n_nodes = Gtw.number_of_nodes()
n_edges = Gtw.number_of_edges()
# Number of components (islands)
n_components = nx.number_connected_components(Gtw)
# Largest Component
if n_edges > 0:
Gtlc = max(nx.connected_component_subgraphs(Gtw), key=len)
n_nodes_largest_component = Gtlc.number_of_nodes()
n_edges_largest_component = Gtlc.number_of_edges()
else:
n_nodes_largest_component = 0
n_edges_largest_component = 0
r.append((celltype, layer, weight, n_nodes, n_edges, n_components, n_nodes_largest_component, n_edges_largest_component))
print('# Number of nodes/edges in the layer of the thresholded>0.5 network\n')
df_stat = | pd.DataFrame(r, columns=['celltype', 'species', 'edge-type', '#-nodes', '#-edges', '#-comps.', '#-nodes-in-lgt-comp.', '#-edges-lgt-comp.']) | pandas.DataFrame |
# Lint as: python3
"""Tests for main_heatmap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import metric_history
import numpy as np
import pandas as pd
SAMPLE_LOGS_LINK = 'https://console.cloud.google.com/logs?project=xl-ml-test&advancedFilter=resource.type%3Dk8s_container%0Aresource.labels.project_id%3Dxl-ml-test%0Aresource.labels.location=us-central1-b%0Aresource.labels.cluster_name=xl-ml-test%0Aresource.labels.namespace_name=automated%0Aresource.labels.pod_name:pt-1.5-cpp-ops-func-v2-8-1587398400&dateRangeUnbound=backwardInTime'
class MetricHistoryTest(parameterized.TestCase):
def test_make_plots_nothing_oob(self):
input_df = pd.DataFrame({
'test_name': pd.Series(['test1', 'test1', 'test1', 'test1']),
'metric_name': pd.Series(['acc', 'loss', 'acc', 'loss']),
'run_date': pd.Series(['2020-04-21', '2020-04-20', '2020-04-20',
'2020-04-21']),
'metric_value': pd.Series([99.1, 0.5, 99.2, 0.6]),
'metric_upper_bound': pd.Series([np.nan, 1.0, np.nan, 1.0]),
'metric_lower_bound': pd.Series([99.0, np.nan, 99.0, np.nan]),
'logs_link': pd.Series([SAMPLE_LOGS_LINK] * 4),
'job_status': pd.Series(['success', 'success', 'success', 'success']),
})
# There should be 2 plots: 1 per metric. Neither should be outlined in red
# since neither metric was oob.
plots = metric_history.make_plots('test1', '', input_df)
self.assertEqual(len(plots), 2)
self.assertItemsEqual([plot.title.text for plot in plots], ['loss', 'acc'])
self.assertNotEqual(plots[0].outline_line_color, 'red')
self.assertNotEqual(plots[1].outline_line_color, 'red')
def test_make_plots_with_oob(self):
input_df = pd.DataFrame({
'test_name': pd.Series(['test1', 'test1', 'test1', 'test1']),
'metric_name': pd.Series(['acc', 'loss', 'acc', 'loss']),
'run_date': pd.Series(['2020-04-21', '2020-04-20', '2020-04-20',
'2020-04-21']),
'metric_value': pd.Series([98.1, 0.5, 99.2, 0.6]),
'metric_upper_bound': pd.Series([np.nan, 1.0, np.nan, 1.0]),
'metric_lower_bound': pd.Series([99.0, np.nan, 99.0, np.nan]),
'logs_link': pd.Series([SAMPLE_LOGS_LINK] * 4),
'job_status': pd.Series(['success', 'success', 'success', 'success']),
})
# There should be 2 plots: 1 per metric.
plots = metric_history.make_plots('test1', '', input_df)
self.assertEqual(len(plots), 2)
# 'acc' should come first since it is oob. It should be outlined in red.
self.assertEqual([plot.title.text for plot in plots], ['acc', 'loss'])
self.assertEqual(plots[0].outline_line_color, 'red')
self.assertNotEqual(plots[1].outline_line_color, 'red')
def test_make_plots_with_oob_on_old_date(self):
input_df = pd.DataFrame({
'test_name': pd.Series(['test1', 'test1', 'test1', 'test1']),
'metric_name': pd.Series(['acc', 'loss', 'acc', 'loss']),
'run_date': pd.Series(['2020-04-21', '2020-04-20', '2020-04-20',
'2020-04-21']),
'metric_value': pd.Series([99.1, 0.5, 98.2, 0.6]),
'metric_upper_bound': pd.Series([np.nan, 1.0, np.nan, 1.0]),
'metric_lower_bound': pd.Series([99.0, np.nan, 99.0, np.nan]),
'logs_link': pd.Series([SAMPLE_LOGS_LINK] * 4),
'job_status': pd.Series(['success', 'success', 'success', 'success']),
})
# There should be 2 plots: 1 per metric.
plots = metric_history.make_plots('test1', '', input_df)
self.assertEqual(len(plots), 2)
# 'acc' was oob 2 runs ago but most recent run was OK, so it should not
# be given a red outline.
self.assertItemsEqual([plot.title.text for plot in plots], ['acc', 'loss'])
self.assertNotEqual(plots[0].outline_line_color, 'red')
self.assertNotEqual(plots[1].outline_line_color, 'red')
def test_make_plots_with_metric_substr(self):
input_df = pd.DataFrame({
'test_name': pd.Series(['test1', 'test1', 'test1', 'test1']),
'metric_name': pd.Series(['acc', 'loss', 'acc', 'loss']),
'run_date': pd.Series(['2020-04-21', '2020-04-20', '2020-04-20',
'2020-04-21']),
'metric_value': pd.Series([99.1, 0.5, 98.2, 0.6]),
'metric_upper_bound': | pd.Series([np.nan, 1.0, np.nan, 1.0]) | pandas.Series |
from autogluon.core.utils.feature_selection import *
from autogluon.core.utils.utils import unevaluated_fi_df_template
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import pytest
def evaluated_fi_df_template(features, importance=None, n=None):
rng = np.random.default_rng(0)
importance_df = pd.DataFrame({'name': features})
importance_df['importance'] = rng.standard_normal(len(features)) if importance is None else importance
importance_df['stddev'] = rng.standard_normal(len(features))
importance_df['p_value'] = None
importance_df['n'] = 5 if n is None else n
importance_df.set_index('name', inplace=True)
importance_df.index.name = None
return importance_df
@pytest.fixture
def sample_features():
return ['a', 'b', 'c', 'd', 'e']
@pytest.fixture
def sample_importance_df_1(sample_features):
return evaluated_fi_df_template(sample_features, importance=[0.2, 0.2, None, 1., None], n=[10, 5, 0, 5, 0])
@pytest.fixture
def sample_importance_df_2(sample_features):
return evaluated_fi_df_template(sample_features, importance=[-0.1, -0.1, 0.1, None, None], n=[5, 10, 10, 0, 0])
def test_add_noise_column_df():
# test noise columns are appended to input dataframe and feature_metadata
X = | pd.DataFrame({'a': [1, 2]}) | pandas.DataFrame |
import unittest
import pandas as pd
from pandas.core.indexes.range import RangeIndex
from pandas.testing import assert_frame_equal
import itertools
from datamatch.indices import MultiIndex, NoopIndex, ColumnsIndex
class BaseIndexTestCase(unittest.TestCase):
def assert_pairs_equal(self, pair_a, pair_b):
df1, df2 = pair_a
df3, df4 = pair_b
assert_frame_equal(df1, df3)
assert_frame_equal(df2, df4)
def assert_pairs_list_equal(self, list_a, list_b):
self.assertEqual(len(list_a), len(list_b))
for pair_a, pair_b in itertools.zip_longest(list_a, list_b):
self.assert_pairs_equal(pair_a, pair_b)
class TestNoopIndex(BaseIndexTestCase):
def test_index(self):
df = pd.DataFrame([[1, 2], [3, 4]])
idx = NoopIndex()
keys = idx.keys(df)
self.assertEqual(keys, set([0]))
assert_frame_equal(idx.bucket(df, 0), df)
class TestColumnsIndex(BaseIndexTestCase):
def test_index(self):
cols = ["c", "d"]
df = pd.DataFrame(
[[1, 2], [2, 4], [3, 4]], index=["x", "y", "z"], columns=cols)
idx = ColumnsIndex(["c"])
keys = idx.keys(df)
self.assertEqual(keys, set([(1,), (2,), (3,)]))
assert_frame_equal(
idx.bucket(df, (1,)),
pd.DataFrame([[1, 2]], index=["x"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (2,)),
pd.DataFrame([[2, 4]], index=["y"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (3,)),
pd.DataFrame([[3, 4]], index=["z"], columns=cols)
)
def test_multi_columns(self):
cols = ["c", "d"]
df = pd.DataFrame(
[[1, 2], [2, 4], [3, 4]], index=["z", "x", "c"], columns=cols)
idx = ColumnsIndex(["c", "d"])
keys = idx.keys(df)
self.assertEqual(keys, set([(1, 2), (2, 4), (3, 4)]))
assert_frame_equal(
idx.bucket(df, (1, 2)),
pd.DataFrame([[1, 2]], index=["z"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (2, 4)),
pd.DataFrame([[2, 4]], index=["x"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (3, 4)),
pd.DataFrame([[3, 4]], index=["c"], columns=cols)
)
def test_ignore_key_error(self):
df = pd.DataFrame(
[[1, 2], [3, 4]], columns=['a', 'b']
)
self.assertRaises(KeyError, lambda: ColumnsIndex('c').keys(df))
self.assertEqual(ColumnsIndex(
'c', ignore_key_error=True).keys(df), set())
def test_index_elements(self):
cols = ['col1', 'col2']
df = pd.DataFrame(
[
[['a', 'b'], 'q'],
[['c'], 'w'],
[['b'], 'e'],
],
index=RangeIndex(start=0, stop=3),
columns=cols
)
idx = ColumnsIndex('col1', index_elements=True)
keys = idx.keys(df)
self.assertEqual(keys, set([('a',), ('b',), ('c',)]))
assert_frame_equal(
idx.bucket(df, ('a',)),
pd.DataFrame([
[['a', 'b'], 'q']
], index=[0], columns=cols)
)
assert_frame_equal(
idx.bucket(df, ('b',)),
pd.DataFrame([
[['a', 'b'], 'q'],
[['b'], 'e'],
], index=[0, 2], columns=cols)
)
def test_index_elements_multi_columns(self):
cols = ['col1', 'col2', 'col3']
df = pd.DataFrame(
[
[['a', 'b'], 'q', [1]],
[['c'], 'w', [2, 3]],
[['b'], 'e', [1]],
],
index=RangeIndex(start=0, stop=3),
columns=cols
)
idx = ColumnsIndex(['col1', 'col3'], index_elements=True)
keys = idx.keys(df)
self.assertEqual(keys, set([
('c', 2), ('a', 1), ('b', 1), ('b', 1), ('c', 3)
]))
assert_frame_equal(
idx.bucket(df, ('a', 1)),
pd.DataFrame([
[['a', 'b'], 'q', [1]],
], index=[0], columns=cols)
)
assert_frame_equal(
idx.bucket(df, ('b', 1)),
pd.DataFrame([
[['a', 'b'], 'q', [1]],
[['b'], 'e', [1]],
], index=[0, 2], columns=cols)
)
class MultiIndexTestCase(BaseIndexTestCase):
def test_index(self):
cols = ["c", "d"]
df = pd.DataFrame(
[[1, 2], [2, 4], [3, 4]], index=["x", "y", "z"], columns=cols
)
idx = MultiIndex([
ColumnsIndex('c'),
ColumnsIndex('d')
])
keys = idx.keys(df)
self.assertEqual(keys, set([(1,), (2,), (3,), (4,)]))
assert_frame_equal(
idx.bucket(df, (1,)),
pd.DataFrame([[1, 2]], index=["x"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (2,)),
| pd.DataFrame([[1, 2], [2, 4]], index=["x", "y"], columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
#AppAutomater.py has App graphs and data
#Graphs.py has all graphs
#Data.py has all data processing stuff
#Downloader.py is used to download files daily
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
#from apscheduler.schedulers.background import BackgroundScheduler
#import atexit
import plotly.express as px
import json
import numpy as np
import pandas as pd
from pymongo import MongoClient
#Scheduler to update data
###########################################################################
###########################################################################
def g():
#client =
#db =
#collection =
#Read Only Needed Data
###########################################################################
###########################################################################
grouped_daily_cities = collection.find_one({"index":"grouped_daily_cities"})
grouped_daily_cities = pd.DataFrame(grouped_daily_cities["data"])
grouped_cumulative_cities = collection.find_one({"index":"grouped_cumulative_cities"})
grouped_cumulative_cities = pd.DataFrame(grouped_cumulative_cities["data"])
g.grouped_daily_weekly = collection.find_one({"index":"grouped_daily_weekly"})
g.grouped_daily_weekly = pd.DataFrame(g.grouped_daily_weekly["data"])
df = collection.find_one({"index":"df"})
df = pd.DataFrame(df["data"])
# df=pd.read_csv('Data/df.csv')
df_Total = collection.find_one({"index":"df_Total"})
df_Total = pd.DataFrame(df_Total["data"])
# df_Total=pd.read_csv('Data/Total.csv')
g.grouped_daily_regions = collection.find_one({"index":"grouped_daily_regions"})
g.grouped_daily_regions = pd.DataFrame(g.grouped_daily_regions["data"])
# g.grouped_daily_regions=pd.read_csv('Data/grouped_daily_regions.csv')
grouped_cumulative_melt = collection.find_one({"index":"grouped_cumulative_melt"})
grouped_cumulative_melt = pd.DataFrame(grouped_cumulative_melt["data"])
# grouped_cumulative_melt=pd.read_csv('Data/grouped_cumulative_melt.csv')
grouped_cumulative = collection.find_one({"index":"grouped_cumulative"})
grouped_cumulative = pd.DataFrame(grouped_cumulative["data"])
# grouped_cumulative=pd.read_csv('Data/grouped_cumulative.csv')
grouped_daily = collection.find_one({"index":"grouped_daily"})
grouped_daily = pd.DataFrame(grouped_daily["data"])
# grouped_daily=pd.read_csv('Data/grouped_daily.csv')
grouped_daily_melt = collection.find_one({"index":"grouped_daily_melt"})
grouped_daily_melt = pd.DataFrame(grouped_daily_melt["data"])
# grouped_daily_melt=pd.read_csv('Data/grouped_daily_melt.csv')
grouped_daily_cities_weekly = collection.find_one({"index":"grouped_daily_cities_weekly"})
grouped_daily_cities_weekly = pd.DataFrame(grouped_daily_cities_weekly["data"])
# grouped_daily_cities_weekly=pd.read_csv('Data/grouped_daily_cities_weekly.csv')
grouped_daily_regions_weekly = collection.find_one({"index":"grouped_daily_regions_weekly"})
grouped_daily_regions_weekly = | pd.DataFrame(grouped_daily_regions_weekly["data"]) | pandas.DataFrame |
import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import requests
import matplotlib.pyplot as plt
import mplfinance as mpf
import time
url = 'https://finance.naver.com/item/sise_day.nhn?code=068270&page=1'
# url = 'https://raw.githubusercontent.com/gangserver/pydev/main/net/dasom/python/stock/chap04/sample/naver_finance_068270.html'
hd = {'User-agent': 'Mozilla/5.0'}
def get_data(url):
return requests.get(url, headers=hd).text
# req = Request(url, headers=hd)
# with urlopen(req) as doc:
# html = BeautifulSoup(doc, 'lxml')
# pgrr = html.find('td', class_='pgRR')
# s = pgrr.a['href']
# last_page = s.split('=')[-1]
# print(last_page)
html = BeautifulSoup(get_data(url), 'lxml')
pgrr = html.find('td', class_='pgRR')
s = pgrr.a['href']
last_page = s.split('=')[-1]
print(last_page)
df = pd.DataFrame()
sise_url = 'https://finance.naver.com/item/sise_day.nhn?code=068270'
for page in range(1, int(last_page)+1):
print(page)
page_url = '{}&page={}'.format(sise_url, page)
df = df.append(pd.read_html(get_data(page_url))[0])
time.sleep(1)
df = df.dropna()
# print(df)
df.to_csv('data/sise_day_068270.csv')
# df = pd.read_csv('data/sise_day_068270.csv', usecols=['날짜', '종가', '전일비', '시가', '고가', '저가', '거래량'])
# print(df)
df = df.iloc[0:30]
df = df.sort_values(by='날짜')
plt.title('Celltrion (close)')
plt.plot(df['날짜'], df['종가'], 'co-')
plt.xticks(rotation='45')
plt.grid(color='gray', linestyle='--')
plt.show()
df = df.rename(columns={'날짜':'Date', '시가':'Open', '고가':'High', '저가':'Low', '종가':'Close', '거래량':'Volume'})
df = df.sort_values(by='Date')
df.index = | pd.to_datetime(df.Date) | pandas.to_datetime |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os.path
import pkg_resources
import tempfile
import unittest
import numpy as np
import pandas as pd
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn, MetadataFileError)
def get_data_path(filename):
return pkg_resources.resource_filename('qiime2.metadata.tests',
'data/%s' % filename)
# NOTE: many of the test files in the `data` directory intentionally have
# leading/trailing whitespace characters on some lines, as well as mixed usage
# of spaces, tabs, carriage returns, and newlines. When editing these files,
# please make sure your code editor doesn't strip these leading/trailing
# whitespace characters (e.g. Atom does this by default), nor automatically
# modify the files in some other way such as converting Windows-style CRLF
# line terminators to Unix-style newlines.
#
# When committing changes to the files, carefully review the diff to make sure
# unintended changes weren't introduced.
class TestLoadErrors(unittest.TestCase):
def test_path_does_not_exist(self):
with self.assertRaisesRegex(MetadataFileError,
"Metadata file path doesn't exist"):
Metadata.load(
'/qiime2/unit/tests/hopefully/this/path/does/not/exist')
def test_path_is_directory(self):
fp = get_data_path('valid')
with self.assertRaisesRegex(MetadataFileError,
"path points to something other than a "
"file"):
Metadata.load(fp)
def test_non_utf_8_file(self):
fp = get_data_path('invalid/non-utf-8.tsv')
with self.assertRaisesRegex(MetadataFileError,
'encoded as UTF-8 or ASCII'):
Metadata.load(fp)
def test_utf_16_le_file(self):
fp = get_data_path('invalid/simple-utf-16le.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_utf_16_be_file(self):
fp = get_data_path('invalid/simple-utf-16be.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_empty_file(self):
fp = get_data_path('invalid/empty-file')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*file may be empty'):
Metadata.load(fp)
def test_comments_and_empty_rows_only(self):
fp = get_data_path('invalid/comments-and-empty-rows-only.tsv')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*only of comments or empty '
'rows'):
Metadata.load(fp)
def test_header_only(self):
fp = get_data_path('invalid/header-only.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_header_only_with_comments_and_empty_rows(self):
fp = get_data_path(
'invalid/header-only-with-comments-and-empty-rows.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_qiime1_empty_mapping_file(self):
fp = get_data_path('invalid/qiime1-empty.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_invalid_header(self):
fp = get_data_path('invalid/invalid-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'unrecognized ID column name.*'
'invalid_id_header'):
Metadata.load(fp)
def test_empty_id(self):
fp = get_data_path('invalid/empty-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_whitespace_only_id(self):
fp = get_data_path('invalid/whitespace-only-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_empty_column_name(self):
fp = get_data_path('invalid/empty-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_whitespace_only_column_name(self):
fp = get_data_path('invalid/whitespace-only-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_duplicate_ids(self):
fp = get_data_path('invalid/duplicate-ids.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_ids_with_whitespace(self):
fp = get_data_path('invalid/duplicate-ids-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_column_names(self):
fp = get_data_path('invalid/duplicate-column-names.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_duplicate_column_names_with_whitespace(self):
fp = get_data_path(
'invalid/duplicate-column-names-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_id_conflicts_with_id_header(self):
fp = get_data_path('invalid/id-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"ID 'id' conflicts.*ID column header"):
Metadata.load(fp)
def test_column_name_conflicts_with_id_header(self):
fp = get_data_path(
'invalid/column-name-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column name 'featureid' conflicts.*ID "
"column header"):
Metadata.load(fp)
def test_column_types_unrecognized_column_name(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'not_a_column.*column_types.*not a column '
'in the metadata file'):
Metadata.load(fp, column_types={'not_a_column': 'numeric'})
def test_column_types_unrecognized_column_type(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*column_types.*unrecognized column '
'type.*CATEGORICAL'):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'CATEGORICAL'})
def test_column_types_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'categorical',
'col3': 'numeric'})
def test_column_types_override_directive_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple-with-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col3': 'numeric'})
def test_directive_before_header(self):
fp = get_data_path('invalid/directive-before-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'directive.*#q2:types.*searching for '
'header'):
Metadata.load(fp)
def test_unrecognized_directive(self):
fp = get_data_path('invalid/unrecognized-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Unrecognized directive.*#q2:foo.*'
'#q2:types directive is supported'):
Metadata.load(fp)
def test_duplicate_directives(self):
fp = get_data_path('invalid/duplicate-directives.tsv')
with self.assertRaisesRegex(MetadataFileError,
'duplicate directive.*#q2:types'):
Metadata.load(fp)
def test_unrecognized_column_type_in_directive(self):
fp = get_data_path('invalid/unrecognized-column-type.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*unrecognized column type.*foo.*'
'#q2:types directive'):
Metadata.load(fp)
def test_column_types_directive_not_convertible_to_numeric(self):
fp = get_data_path('invalid/types-directive-non-numeric.tsv')
# This error message regex is intentionally verbose because we want to
# assert that many different types of non-numeric strings aren't
# interpreted as numbers. The error message displays a sorted list of
# all values that couldn't be converted to numbers, making it possible
# to test a variety of non-numeric strings in a single test case.
msg = (r"column 'col2' to numeric.*could not be interpreted as "
r"numeric: '\$42', '\+inf', '-inf', '0xAF', '1,000', "
r"'1\.000\.0', '1_000_000', '1e3e4', 'Infinity', 'NA', 'NaN', "
"'a', 'e3', 'foo', 'inf', 'nan', 'sample-1'")
with self.assertRaisesRegex(MetadataFileError, msg):
Metadata.load(fp)
def test_directive_after_directives_section(self):
fp = get_data_path(
'invalid/directive-after-directives-section.tsv')
with self.assertRaisesRegex(MetadataFileError,
'#q2:types.*outside of the directives '
'section'):
Metadata.load(fp)
def test_directive_longer_than_header(self):
fp = get_data_path('invalid/directive-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
def test_data_longer_than_header(self):
fp = get_data_path('invalid/data-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
class TestLoadSuccess(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
# This Metadata object is compared against observed Metadata objects in
# many of the tests, so just define it once here.
self.simple_md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
# Basic sanity check to make sure the columns are ordered and typed as
# expected. It'd be unfortunate to compare observed results to expected
# results that aren't representing what we think they are!
obs_columns = [(name, props.type)
for name, props in self.simple_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
# Simple metadata file without comments, empty rows, jaggedness,
# missing data, odd IDs or column names, directives, etc. The file has
# multiple column types (numeric, categorical, and something that has
# mixed numbers and strings, which must be interpreted as categorical).
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_bom_simple_txt(self):
# This is the encoding that notepad.exe will use most commonly
fp = get_data_path('valid/BOM-simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_different_file_extension(self):
fp = get_data_path('valid/simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_newline_at_eof(self):
fp = get_data_path('valid/no-newline-at-eof.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_unix_line_endings(self):
fp = get_data_path('valid/unix-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_windows_line_endings(self):
fp = get_data_path('valid/windows-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_mac_line_endings(self):
fp = get_data_path('valid/mac-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_source_artifacts(self):
fp = get_data_path('valid/simple.tsv')
metadata = Metadata.load(fp)
self.assertEqual(metadata.artifacts, ())
def test_retains_column_order(self):
# Explicitly test that the file's column order is retained in the
# Metadata object. Many of the test cases use files with column names
# in alphabetical order (e.g. "col1", "col2", "col3"), which matches
# how pandas orders columns in a DataFrame when supplied with a dict
# (many of the test cases use this feature of the DataFrame
# constructor when constructing the expected DataFrame).
fp = get_data_path('valid/column-order.tsv')
obs_md = Metadata.load(fp)
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_columns = ['z', 'y', 'x']
exp_data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_leading_trailing_whitespace(self):
fp = get_data_path('valid/leading-trailing-whitespace.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_comments(self):
fp = get_data_path('valid/comments.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_empty_rows(self):
fp = get_data_path('valid/empty-rows.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_qiime1_mapping_file(self):
fp = get_data_path('valid/qiime1.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='#SampleID')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_sample_information_file(self):
fp = get_data_path('valid/qiita-sample-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'DESCRIPTION': ['description 1', 'description 2'],
'TITLE': ['A Title', 'Another Title']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_preparation_information_file(self):
fp = get_data_path('valid/qiita-preparation-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'BARCODE': ['ACGT', 'TGCA'],
'EXPERIMENT_DESIGN_DESCRIPTION': ['longitudinal study',
'longitudinal study']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_biom_observation_metadata_file(self):
fp = get_data_path('valid/biom-observation-metadata.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['OTU_1', 'OTU_2'], name='#OTUID')
exp_df = pd.DataFrame([['k__Bacteria;p__Firmicutes', 0.890],
['k__Bacteria', 0.9999]],
columns=['taxonomy', 'confidence'],
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
fp = os.path.join(self.temp_dir, 'metadata.tsv')
count = 0
for header in headers:
with open(fp, 'w') as fh:
fh.write('%s\tcolumn\nid1\tfoo\nid2\tbar\n' % header)
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2'], name=header)
exp_df = pd.DataFrame({'column': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
fp = get_data_path('valid/recommended-ids.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
exp_df = pd.DataFrame({'col1': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_non_standard_characters(self):
# Test that non-standard characters in IDs, column names, and cells are
# handled correctly. The test case isn't exhaustive (e.g. it doesn't
# test every Unicode character; that would be a nice additional test
# case to have in the future). Instead, this test aims to be more of an
# integration test for the robustness of the reader to non-standard
# data. Many of the characters and their placement within the data file
# are based on use-cases/bugs reported on the forum, Slack, etc. The
# data file has comments explaining these test case choices in more
# detail.
fp = get_data_path('valid/non-standard-characters.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
exp_columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"',
'col\t \r\n5']
exp_data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_missing_data(self):
fp = get_data_path('valid/missing-data.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['None', 'nan', 'NA'], name='id')
exp_df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', 'NA']),
('col4', np.array([np.nan, np.nan, np.nan], dtype=object))]),
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
# Test that column types are correct (mainly for the two empty columns;
# one should be numeric, the other categorical).
obs_columns = [(name, props.type)
for name, props in obs_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('NA', 'numeric'),
('col3', 'categorical'), ('col4', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def test_minimal_file(self):
# Simplest possible metadata file consists of one ID and zero columns.
fp = get_data_path('valid/minimal.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_id(self):
fp = get_data_path('valid/single-id.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1'], name='id')
exp_df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_no_columns(self):
fp = get_data_path('valid/no-columns.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a', 'b', 'my-id'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_column(self):
fp = get_data_path('valid/single-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0]}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_trailing_columns(self):
fp = get_data_path('valid/trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_jagged_trailing_columns(self):
# Test case based on https://github.com/qiime2/qiime2/issues/335
fp = get_data_path('valid/jagged-trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_padding_rows_shorter_than_header(self):
fp = get_data_path('valid/rows-shorter-than-header.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, np.nan],
'col2': ['a', np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_all_cells_padded(self):
fp = get_data_path('valid/all-cells-padded.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [np.nan, np.nan, np.nan],
'col2': [np.nan, np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_does_not_cast_ids_or_column_names(self):
fp = get_data_path('valid/no-id-or-column-name-type-cast.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['0.000001', '0.004000', '0.000000'],
dtype=object, name='id')
exp_columns = ['42.0', '1000', '-4.2']
exp_data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': [0.0, 2.0, 0.0003, -4.2, 1e-4, 1e4,
1.5e2, np.nan, 1.0, 0.5, 1e-8, -0.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column_as_categorical(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': ['0', '2.0', '0.00030', '-4.2', '1e-4',
'1e4', '+1.5E+2', np.nan, '1.', '.5',
'1e-08', '-0']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_complete_types_directive(self):
fp = get_data_path('valid/complete-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_partial_types_directive(self):
fp = get_data_path('valid/partial-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_empty_types_directive(self):
fp = get_data_path('valid/empty-types-directive.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_with_case_insensitive_types_directive(self):
fp = get_data_path('valid/case-insensitive-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': [-5.0, 0.0, 42.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_without_directive(self):
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_override_directive(self):
fp = get_data_path('valid/simple-with-directive.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical',
'col2': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
class TestSave(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
self.filepath = os.path.join(self.temp_dir, 'metadata.tsv')
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\tcol3\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_save_metadata_auto_extension(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
# Filename & extension endswith is matching (non-default).
fp = os.path.join(self.temp_dir, 'metadatatsv')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadatatsv.tsv')
# No period in filename; no extension included.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp)
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata')
# No period in filename; no period in extension.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp, 'tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# No period in filename; multiple periods in extension.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp, '..tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Single period in filename; no period in extension.
fp = os.path.join(self.temp_dir, 'metadata.')
obs_md = md.save(fp, 'tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Single period in filename; single period in extension.
fp = os.path.join(self.temp_dir, 'metadata.')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Single period in filename; multiple periods in extension.
fp = os.path.join(self.temp_dir, 'metadata.')
obs_md = md.save(fp, '..tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Multiple periods in filename; single period in extension.
fp = os.path.join(self.temp_dir, 'metadata..')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Multiple periods in filename; multiple periods in extension.
fp = os.path.join(self.temp_dir, 'metadata..')
obs_md = md.save(fp, '..tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# No extension in filename; no extension input.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp)
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata')
# No extension in filename; extension input.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Extension in filename; no extension input.
fp = os.path.join(self.temp_dir, 'metadata.tsv')
obs_md = md.save(fp)
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Extension in filename; extension input (non-matching).
fp = os.path.join(self.temp_dir, 'metadata.tsv')
obs_md = md.save(fp, '.txt')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv.txt')
# Extension in filename; extension input (matching).
fp = os.path.join(self.temp_dir, 'metadata.tsv')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
def test_no_bom(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'rb') as fh:
obs = fh.read(2)
self.assertEqual(obs, b'id')
def test_different_file_extension(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
filepath = os.path.join(self.temp_dir, 'metadata.txt')
md.save(filepath)
with open(filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\tcol3\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_some_missing_data(self):
md = Metadata(
pd.DataFrame({'col1': [42.0, np.nan, -3.5],
'col2': ['a', np.nan, np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\n"
"#q2:types\tnumeric\tcategorical\n"
"id1\t42\ta\n"
"id2\t\t\n"
"id3\t-3.5\t\n"
)
self.assertEqual(obs, exp)
def test_all_missing_data(self):
# nan-only columns that are numeric or categorical.
md = Metadata(
pd.DataFrame({'col1': [np.nan, np.nan, np.nan],
'col2': np.array([np.nan, np.nan, np.nan],
dtype=object)},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\n"
"#q2:types\tnumeric\tcategorical\n"
"id1\t\t\n"
"id2\t\t\n"
"id3\t\t\n"
)
self.assertEqual(obs, exp)
def test_unsorted_column_order(self):
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'b', 'y']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
md = Metadata(pd.DataFrame(data, index=index, columns=columns))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tz\tb\ty\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_alternate_id_header(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index= | pd.Index(['id1', 'id2', 'id3'], name='#SampleID') | pandas.Index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.