prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = | pd.DatetimeIndex([x[1] for x in result]) | pandas.DatetimeIndex |
import pandas as pd
import time
from collections import defaultdict
import re
import pickle
import argparse
import csv
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import pickle as pkl
import math
import itertools
import os
import scipy
import numpy as np
from datetime import datetime
import copy
from utils import str_to_datetime
def parse_args():
parser = argparse.ArgumentParser(description='process parameters')
parser.add_argument('--input_dispense', default=r'DELETE-ADD-LATER\DISPENSING.csv',
help='input data MCI_DISPENSING.csv directory')
parser.add_argument('--input_prescribe', default=r'DELETE-ADD-LATER\PRESCRIBING.csv',
help='input data MCI_PRESCRIBING.csv directory')
args = parser.parse_args()
return args
# %% Build NDC / RXNORM to ingredients mapping
def combine_two_dict(d1, d2):
"""
Combine two dicts with same semantics of key and value,
No nan as keys or values of any dicts
Check consistency
"""
print('***combine_two_dict:')
k_v = {}
for d in [d1, d2]:
print('len(d): ', len(d))
for key, value in d.items():
# build ndc_ing
assert not pd.isnull(value)
assert not pd.isnull(key)
if key in k_v:
# check consistency
if value != k_v[key]:
print('inconsistency! Key: ', key, 'value != k_v[key]', value, k_v[key])
else:
k_v[key] = value
print('len(d1): ', len(d1), '+', 'len(d2): ', len(d2), '--> len(k_v): ', len(k_v))
return k_v
def clean_str_list(a):
# a = a.replace('\'', '')
# a = a.replace('"', '')
# a = a.replace('[', '')
# a = a.replace(']', '')
# a = re.sub(r"\s+", "", a, flags=re.UNICODE)
a = re.sub(r"['\"\[\]\s+]", "", a, flags=re.UNICODE)
a = a.split(',')
a = [x for x in a if len(x) > 0]
return a
def load_ndc_to_ingredient():
# read from 2 files and build ndc_to_ingredient mappings
# no nan as values of any dictionary
print('********load_ndc_to_ingredient*********')
df_map1 = pd.read_csv(r'mapping/NDC_RXNorm_mapping.csv', dtype=str) # (40157, 4)
df_map2 = pd.read_csv(r'mapping/RXNORM_Ingredient_mapping.csv', dtype=str) # (19171, 4)
df_map2['NDC'] = df_map2['NDC'].apply(clean_str_list)
ndc_ing_1 = {} # len: 26978
n_null_ing_1 = 0
for index, row in df_map1.iterrows():
# NDC_RXNorm_mapping.csv:
# NDC ingredient_code rxnrom
# 0 68462041438
# 1 11523716001 28889 206805
# 2 65862042001 10180 198335
ndc = row['NDC']
rxcui = row['rxnrom']
ing = row['ingredient_code']
# No nan value record.
if pd.isnull(ing):
n_null_ing_1 += 1
continue
if ndc in ndc_ing_1:
# check inconsistency:
# seems no duplicated ingredients
if ing != ndc_ing_1[ndc]:
print('inconsistency ing != ndc_rx1[ndc]:', ing, ndc_ing_1[ndc])
else:
ndc_ing_1[ndc] = ing
ndc_ing_2 = {} # len:
n_null_ing_2 = 0
for index, row in df_map2.iterrows():
ndc = row['NDC']
rxcui = row['RXNORM_CUI']
ing = row['ingredient_code']
# No nan value record.
if pd.isnull(ing):
n_null_ing_2 += 1
continue
for x in ndc:
if x in ndc_ing_2:
# check inconsistency:
# seems no duplicated ingredients
if ing != ndc_ing_2[x]:
print('inconsistency ing != ndc_rx1[ndc]:', ing, ndc_ing_1[x])
else:
ndc_ing_2[x] = ing
print("NDC_RXNorm_mapping.csv:\n",
'len(df_map1): ', len(df_map1),
'n_null_ing_1: ', n_null_ing_1,
'len(ndc_ing_1): ', len(ndc_ing_1))
print("RXNORM_Ingredient_mapping.csv:\n",
'len(df_map2): ', len(df_map2),
'n_null_ing_2: ', n_null_ing_2,
'len(ndc_ing_2): ', len(ndc_ing_2))
return ndc_ing_1, ndc_ing_2
def load_rxnorm_to_ingredient():
"""
Read from 2 files and build rxnorm_to_ingredient mappings
No nan as keys or values of any dictionary
:return: two dicts: rxnorm_ing_1, rxnorm_ing_2
"""
print('********load_rxnorm_to_ingredient*********')
df_map1 = pd.read_csv(r'mapping/NDC_RXNorm_mapping.csv', dtype=str) # (40157, 4)
df_map2 = pd.read_csv(r'mapping/RXNORM_Ingredient_mapping.csv', dtype=str) # (19171, 4)
df_map2['NDC'] = df_map2['NDC'].apply(clean_str_list)
rxnorm_ing_1 = {} # len: 26978
n_null_rxOring_1 = 0
for index, row in df_map1.iterrows():
# NDC_RXNorm_mapping.csv:
# NDC ingredient_code rxnrom
# 0 68462041438
# 1 11523716001 28889 206805
# 2 65862042001 10180 198335
ndc = row['NDC']
rxnorm = row['rxnrom']
ing = row['ingredient_code']
# No nan value record.
if pd.isnull(rxnorm) or pd.isnull(ing):
n_null_rxOring_1 += 1
continue
if rxnorm in rxnorm_ing_1:
# check inconsistency:
# seems no duplicated ingredients, but many dumplicated rxnorm, because different NDCs may have same rxnorm
if ing != rxnorm_ing_1[rxnorm]:
print('inconsistency ing != rxnorm_ing_1[rxnrom]:', ing, rxnorm_ing_1[rxnorm])
else:
rxnorm_ing_1[rxnorm] = ing
rxnorm_ing_2 = {} # len:
n_null_ing_2 = 0
for index, row in df_map2.iterrows():
# RXNORM_Ingredient_mapping.csv
# RXNORM_CUI ingredient_code NDC
# 0 1092360 69036 ['62856058446']
# 1 197407 1514 ['00168004015', '00168004046', '00472037015', ...]
# 2 1741423 828529 ['67467062303', '68982062303']
ndc = row['NDC']
rxnorm = row['RXNORM_CUI']
ing = row['ingredient_code']
# No nan value record.
if | pd.isnull(ing) | pandas.isnull |
""" Fred Model """
__docformat__ = "numpy"
import logging
from typing import Dict, List, Tuple
import fred
import pandas as pd
import requests
from fredapi import Fred
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def check_series_id(series_id: str) -> Tuple[bool, Dict]:
"""Checks if series ID exists in fred
Parameters
----------
series_id: str
Series ID to check
Returns
-------
bool:
Boolean if series ID exists
dict:
Dictionary of series information
"""
url = f"https://api.stlouisfed.org/fred/series?series_id={series_id}&api_key={cfg.API_FRED_KEY}&file_type=json"
r = requests.get(url, headers={"User-Agent": get_user_agent()})
# The above returns 200 if series is found
# There seems to be an occasional bug giving a 503 response where the json decoding fails
if r.status_code >= 500:
return False, {}
return r.status_code == 200, r.json()
@log_start_end(log=logger)
def get_series_notes(series_term: str) -> pd.DataFrame:
"""Get Series notes. [Source: FRED]
Parameters
----------
series_term : str
Search for this series term
Returns
----------
pd.DataFrame
DataFrame of matched series
"""
fred.key(cfg.API_FRED_KEY)
d_series = fred.search(series_term)
if "seriess" not in d_series:
return pd.DataFrame()
if not d_series["seriess"]:
return | pd.DataFrame() | pandas.DataFrame |
import os
import mat73
import json
import numpy as np
import pandas as pd
import cv2
import math
def normalized2KITTI(box):
"""
convert Bbox format
:param box: [X, Y, width, height]
:return: [xmin, ymin, xmax, ymax]
"""
o_x, o_y, o_width, o_height = box
xmin = int(o_x)
ymin = int(o_y)
xmax = int(o_x + o_width)
ymax = int(o_y + o_height)
return [xmin, ymin, xmax, ymax]
def getName(dSName, n):
"""getName returns the 'name' string for for the n(th) digitStruct. """
return dSName[n]
def getBbox_json(dSBbox, n, kitti=False):
"""getBbox returns a dict of data for the n(th) bbox. """
# print(n)
bboxs = []
elem = dSBbox[n]
# print(elem['height'])
if isinstance(elem['height'], list):
l = len(elem['height'])
else:
l = 1
for i in range(l):
try:
h, y, l, t, w = [float(max(k[i], 0)) for k in elem.values()]
except:
h, y, l, t, w = [float(max(k, 0)) for k in elem.values()]
if kitti:
xmin, ymin, xmax, ymax = normalized2KITTI([l, t, w, h])
bbox = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
'label': y
}
else:
bbox = {
'height': h,
'width': w,
'top': t,
'left': l,
'label': y
}
bboxs.append(bbox)
return bboxs
def getDigitStructure_json(dSBbox, dSName, n, format_type):
"""get annotation for given index in dictionary format"""
s = {}
s['boxes'] = getBbox_json(dSBbox, n, format_type)
s['name'] = getName(dSName, n)
return s
def ann_to_json(file_path, save_path, bbox_type='normalize'):
"""convert .mat annotation file into .json file
Args:
file_path (str): .mat file path Ex : relative path '../data/digitStruct.mat' or complete path 'C:/usr/local/data/digitStruct.mat'
save_path (str): .json file directory *Otherthan .json file complete folder directory must exists in the system Ex : '../data/train.json'
bbox_type (str, optional): two type bounding box declaration format whether 'normalize' or 'kitti'. Defaults to 'normalize'.
Returns:
None : just save the .json file in the given dir.
"""
data_dict = mat73.loadmat(file_path)
dSName = data_dict['digitStruct']['name']
dSBbox = data_dict['digitStruct']['bbox']
if bbox_type == 'kitti':
t = True
else:
t = False
json_data = [getDigitStructure_json(
dSBbox, dSName, i, t) for i in range(len(dSBbox))]
with open(save_path, 'w', encoding='utf-8') as pf:
json.dump(json_data, pf, ensure_ascii=True, indent=4)
def getBbox_csv(dSBbox, n, kitti=False):
"""getBbox returns a dict of data for the n(th) bbox. """
# print(n)
bboxs = []
elem = dSBbox[n]
# print(elem['height'])
if isinstance(elem['height'], list):
l = len(elem['height'])
else:
l = 1
for i in range(l):
# print(elem.values())
try:
h, y, l, t, w = [float(k[i]) for k in elem.values()]
except:
h, y, l, t, w = [float(k) for k in elem.values()]
if kitti:
xmin, ymin, xmax, ymax = normalized2KITTI([l, t, w, h])
bbox = [y, xmin, ymin, xmax, ymax]
else:
bbox = [y, l, t, w, h]
bboxs.append(bbox)
return bboxs
def getDigitStructure_csv(dSBbox, dSName, n, format_type):
"""get annotation for given index in list of lists format"""
s = getBbox_csv(dSBbox, n, format_type)
filen = getName(dSName, n)
for lis in s:
lis.insert(0, filen)
return s
def ann_to_csv(file_path, save_path, bbox_type='normalize'):
"""convert .mat annotation file into .csv file
Args:
file_path (str): .mat file path Ex : relative path '../data/digitStruct.mat' or complete path 'C:/usr/local/data/digitStruct.mat'
save_path (str): .json file directory *Otherthan .json file complete folder directory must exists in the system Ex : '../data/train.json'
bbox_type (str, optional): two type bounding box declaration format whether 'normalize' or 'kitti'. Defaults to 'normalize'.
Returns:
None : just save the .csv file in the given dir.
"""
data_dict = mat73.loadmat(file_path)
dSName = data_dict['digitStruct']['name']
dSBbox = data_dict['digitStruct']['bbox']
if bbox_type == 'kitti':
t = True
else:
t = False
data_arr = []
for i in range(len(dSBbox)):
data_arr.extend(getDigitStructure_csv(dSBbox, dSName, i, t))
numpy_data = np.array(data_arr)
if t:
cols = ['filename', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
else:
cols = ['filename', 'class', 'left', 'top', 'width', 'height']
df = | pd.DataFrame(data=numpy_data, columns=cols) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = | Series([1, 4, np.nan, 16], index=[1, 2, 3, 4]) | pandas.Series |
#!/usr/bin/env python3
import argparse
import datetime
import logging
import os
import shutil
import time
from subprocess import check_output
import coloredlogs
import netCDF4 as nc
import numpy as np
import pandas as pd
from inicheck.utilities import mk_lst, remove_chars
from spatialnc.topo import get_topo_stats
from spatialnc.utilities import copy_nc, mask_nc
from basin_setup import __version__
DEBUG = False
def parse_fname_date(fname):
"""
Attempts to parse the date from the filename using underscores. This
assumes there is a parseable date that can be found between underscores
and can be determined with only numeric characters. E.g. 20200414. An
example of this is in a file would be:
USCASJ20200414_SUPERsnow_depth_50p0m_agg.tif
Args:
fname: File name containing a completely numeric date string in it
Return:
dt: Datetime object if pareable string was found, otherwise none
"""
bname = os.path.basename(fname)
# Remove the extension
bname = bname.split('.')[0]
dt = None
if "_" in bname:
# Assum underscores act like spaces
bname = bname.split("_")
else:
bname = [bname]
# Attempt to parse a date in the filename one at a time
for w in bname:
# Grab only numbers and letters
dt_str = "".join([c for c in w if c.isnumeric()])
try:
# Successful datestring found, break out
if dt_str:
dt = | pd.to_datetime(dt_str) | pandas.to_datetime |
# python 3.7
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# coding: utf-8
"""
1. using most recent publication of researchers as input to generate user profiles
2. pretrain word2vec model window_5.model.bin and candidate_paper.csv are available via google drive link,
you can download the files and
change the path in this script so as to run the script successfully.
3. result saved in rank_result_rm/rank_result_mr_own_corpus.csv
"""
import sys
from gensim.models.keyedvectors import KeyedVectors
import numpy as np
import pandas as pd
from datetime import datetime
# load pre-train model on my own corpus
model = '/Users/sherry/Downloads/window_5/window_5.model.bin'
w2v_model = KeyedVectors.load_word2vec_format(model, binary=True)
# read all candidate papers info, contain two columns: paper ID and paper content
candidate_paper_df = pd.read_csv('/Users/sherry/Downloads/candidate_papers.csv')
# define DocSim class to calculate document similarities
class DocSim(object):
def __init__(self, w2v_model , stopwords=[]):
self.w2v_model = w2v_model
self.stopwords = stopwords
def vectorize(self, doc):
"""Identify the vector values for each word in the given document"""
doc = str(doc)
doc = doc.lower()
words = [w for w in doc.split(" ") if w not in self.stopwords]
word_vecs = []
for word in words:
try:
vec = self.w2v_model[word]
word_vecs.append(vec)
except KeyError:
# Ignore, if the word doesn't exist in the vocabulary
pass
# Assuming that document vector is the mean of all the word vectors
vector = np.mean(word_vecs, axis=0)
return vector
def _cosine_sim(self, vecA, vecB):
"""Find the cosine similarity distance between two vectors."""
csim = np.dot(vecA, vecB) / (np.linalg.norm(vecA) * np.linalg.norm(vecB))
if np.isnan(np.sum(csim)):
return 0
return csim
def calculate_similarity(self,user_profile,candidate_papers,threshold=0):
# Computing similarity between a given source document in user profile
# and all target documents in candidate papers
# candidate_papers is dataframe, user_profile is a one-line string
# rename columns in user_profile and candidate_papers
candidate_papers.columns = ['paperID', 'paperText']
# convert dataframe to dict
candidate_paper_dict = candidate_papers.set_index('paperID').to_dict()
# for each user profile doc as source doc, calculate similarity with each
# target doc
source_doc = str(user_profile)
source_vec = self.vectorize(source_doc)
result = []
i = 1
for paperID,paperText in candidate_paper_dict['paperText'].items():
target_doc = str(paperText)
target_vec = self.vectorize(target_doc)
sim_score = self._cosine_sim(source_vec, target_vec)
if sim_score > threshold:
result.append([paperID,sim_score])
# Sort results by similar scores in desc order
result.sort(key=lambda k : k[1] , reverse=True)
return result
def compute_sim_all_pubs(self, user_profile, candidate_papers,threshold=0):
"""
Computing similarity between several given source documents in user profile (with equal weight) and all target
documents in candidate
papers
:param user_profile: a list, all source docs of a researcher that used to construct one user profile
:param candidate_papers: a dataframe, all target docs that used as candidate recommend doc
:param threshold: filter recommend items according to threshold
:return: Sort rank results by similar scores in desc order
"""
# rename columns in user_profile and candidate_papers
candidate_papers.columns = ['paperID', 'paperText']
# convert dataframe to dict
candidate_paper_dict = candidate_papers.set_index('paperID').to_dict()
# for each user, source_doc_ls contains all his/her publications
source_docs_vec_ls = []
for pubished_seq,source_doc in enumerate(user_profile):
source_doc_vec = self.vectorize(source_doc)
# add each source doc vector into list source_docs_vec_ls
source_docs_vec_ls.append(source_doc_vec)
# compute user profile vector for each researcher based on all their publications with equal weight
user_profile_vec = np.sum(source_docs_vec_ls,axis = 0)/len(source_docs_vec_ls)
rank_result = []
i = 1
for paperID,paperText in candidate_paper_dict['paperText'].items():
target_doc = str(paperText)
target_vec = self.vectorize(target_doc)
sim_score = self._cosine_sim(user_profile_vec, target_vec)
if sim_score > threshold:
rank_result.append([paperID,sim_score])
# Sort results by similar scores in desc order
rank_result.sort(key=lambda k : k[1] , reverse=True)
return rank_result
ds = DocSim(w2v_model)
# get the list of number of publications for each researcher
import pandas as pd
user_statistics_df = pd.read_csv('user_profiles/user_profiles_statistics.csv')
num_pubs_ls = user_statistics_df.iloc[:,1].tolist()
new_df = pd.DataFrame()
ranking = [1,2,3,4,5,6,7,8,9,10]
new_df.insert(0,'ranking',ranking)
# reverse all researchers publications
for i in range(1,51,1):
r = 'R' + str(i)
print(datetime.now())
user_profile = []
# reverse all publications of one researcher, get a list of
print('number of publication for researcher {} is {}'.format(r, num_pubs_ls[i - 1]))
for j in range(1,num_pubs_ls[i-1]+1):
with open('user_profiles/user_profile_after_text_cleaning/cleaned_R{}-{}.txt'.format(i,j), 'r') as f:
each_doc = f.read() # each_doc is a string
# all source docs of a researcher that used to construct his/her user profile
user_profile.append(each_doc)
print('the len of user_profile list for this researcher is: {}'.format(len(user_profile)))
# computing sim scores
sim_scores = ds.compute_sim_all_pubs(user_profile, candidate_paper_df)
df = | pd.DataFrame(sim_scores) | pandas.DataFrame |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = | pd.DataFrame(expect_collection_expandFull['pad']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.types.dtypes import DatetimeTZDtype, PeriodDtype, CategoricalDtype
from pandas.types.common import pandas_dtype, is_dtype_equal
import pandas.util.testing as tm
class TestPandasDtype(tm.TestCase):
def test_numpy_dtype(self):
for dtype in ['M8[ns]', 'm8[ns]', 'object', 'float64', 'int64']:
self.assertEqual(pandas_dtype(dtype), np.dtype(dtype))
def test_numpy_string_dtype(self):
# do not parse freq-like string as period dtype
self.assertEqual(pandas_dtype('U'), np.dtype('U'))
self.assertEqual(pandas_dtype('S'), np.dtype('S'))
def test_datetimetz_dtype(self):
for dtype in ['datetime64[ns, US/Eastern]',
'datetime64[ns, Asia/Tokyo]',
'datetime64[ns, UTC]']:
self.assertIs(pandas_dtype(dtype), DatetimeTZDtype(dtype))
self.assertEqual(pandas_dtype(dtype), DatetimeTZDtype(dtype))
self.assertEqual(pandas_dtype(dtype), dtype)
def test_categorical_dtype(self):
self.assertEqual(pandas_dtype('category'), CategoricalDtype())
def test_period_dtype(self):
for dtype in ['period[D]', 'period[3M]', 'period[U]',
'Period[D]', 'Period[3M]', 'Period[U]']:
self.assertIs(pandas_dtype(dtype), PeriodDtype(dtype))
self.assertEqual(pandas_dtype(dtype), PeriodDtype(dtype))
self.assertEqual(pandas_dtype(dtype), dtype)
dtypes = dict(datetime_tz= | pandas_dtype('datetime64[ns, US/Eastern]') | pandas.types.common.pandas_dtype |
import pandas as pd
import numpy as np
from sklearn import datasets, preprocessing, metrics, model_selection
from ..models.ccf import CanonicalCorrelationForestClassifier
def test_ccf():
data = datasets.load_breast_cancer()
X_train, X_valid, y_train, y_valid = model_selection.train_test_split(preprocessing.scale(data.data), data.target)
X_train = | pd.DataFrame(X_train, columns=data.feature_names) | pandas.DataFrame |
# http://github.com/timestocome
#
# Build Bayesian using daily BitCoin Closing Price
# Use today and tomorrow's data to see if it can predict
# next few days market movements
from collections import Counter
import pandas as pd
import numpy as np
# http://coindesk.com/price
data_file = 'BitCoin_Daily_Close.csv'
##########################################################################################################
# utility functions
#########################################################################################################
states = ['HG', 'MG', 'LG', 'S', 'LL', 'ML', 'HL']
# break daily changes into buckets
# I'm using 6 buckets HG +10%+, MG +5%-10%, LG +1/2%-5%, S -1/2%-1/2%, LL -1/2%-5%, ML -5%-10%, HL -10%+
# the more buckets (complexity you add the more data you'll need to get good results)
def categorize(gain):
if gain > 10.:
return 'HG'
elif gain > 5.:
return 'MG'
elif gain > 0.5:
return 'LG'
elif gain > -0.5:
return 'S'
elif gain > -5.:
return 'LL'
elif gain > -10.:
return 'ML'
else:
return 'HL'
# read in file from http://coindesk.com/price, save dates and log of daily closing price
# and daily change sorted into buckets
def read_data(file):
# read in data file
input = pd.read_csv(file, parse_dates=True, index_col=0)
# convert to log scale and calculate daily change ( volatility )
input['LogOpen'] = np.log(input['Close'])
input['change'] = input['LogOpen'] / input['LogOpen'].shift(1)
input = input.dropna(axis=0)
# group changes into buckets and label them
input['change'] = (input['change'] - 1.) * 1000.
input['dx'] = input['change'].apply(categorize)
# dump the columns we're done with
#print(input['dx'].describe())
return input['dx']
########################################################################################################
# build data structures
########################################################################################################
def probability_movement(input):
# calculate number of times each item appears and save
counts = input.value_counts().to_dict()
total = len(input)
print('Counts: ', counts)
# convert to probabilty of each change occurring s/b sum to ~1
probability = {k: v / total for k, v in counts.items()}
print('Probability:', probability)
return probability
# want probability of today, given tomorrow
def probability_today_given_tomorrow(input):
# create a node for every unique letter
uniques = set(input)
# get total shifts so we can use probabilities in predictions
total = len(input)
# reverse data need the likelihood of today based on what happens tomorrow using
# historical data
print(input)
# create edges
edges = []
for i in range(len(uniques)):
n = list(uniques)[i]
for j in range(len(data)-1):
if data[j] == n:
edges.append( (n, data[j+1]) )
# count times each edge occurs
edge_count = | pd.Series(data=edges) | pandas.Series |
import os
import json
import random
import pandas as pd
import numpy as np
import experiments
import utils
import granularity
from granularity import SeqRange, VectorRange, TaggedString
from eval_functions import eval_f1, iou_score_multi, rmse
import merge_functions
def label2tvr(label, default=None):
return default if label is None else [SeqRange(l) for l in label]
# return default if label is None else [{"range":SeqRange(l), "tag":None} for l in label]
def convert2vr(annotations):
result = []
for annotation in annotations:
left = annotation["left"]
top = annotation["top"]
if annotation["width"] > 0 and annotation["height"] > 0:
result.append(VectorRange([left, top], [left + annotation["width"], top + annotation["height"]]))
else:
return np.nan
return result
class ExperimentResult(pd.DataFrame):
'''
Output experiment results together as csv/dataframe with following columns:
* Dataset name
* Evaluation function name
* Distance function name
* Aggregation method name
* Predicted annotations for all items
* Total average evaluation score
* Extra arguments
'''
def __init__(self, dataset_name, eval_fn_name, dist_fn_name, agg_method_name, preds, score, extra=None):
result = {
"Dataset name": [dataset_name],
"Eval Fn name": [eval_fn_name],
"Dist Fn name": [dist_fn_name],
"Agg method name": [agg_method_name],
"Predicted": [preds],
"Eval score": [score],
"Misc.": [extra]
}
super().__init__(result)
class AffectExperiment(experiments.RealExperiment):
def __init__(self, **kwargs):
super().__init__(lambda x,y: 1 / rmse(x,y) , "annotation", "item", "uid", rmse)
self.data_dir = "data/snow_affect/"
self.merge_fn = merge_functions.numerical_mean
def setup(self):
emotions = ["surprise", "disgust", "sadness", "fear", "valence", "joy", "anger"]
def load_snow(relfilepath):
return pd.read_csv(relfilepath, sep="\t").set_index("!amt_annotation_ids")
dfs = [load_snow(self.data_dir + f + ".standardized.tsv") for f in emotions]
full_df = pd.concat(dfs, join="inner", axis=1)
full_df["annotation"] = full_df["response"].values.tolist()
full_df["groundtruth"] = full_df["gold"].values.tolist()
full_df["uid"] = full_df["!amt_worker_ids"].values[:,0]
full_df["item"] = full_df["orig_id"].values[:,0]
full_df = full_df[["item", "uid", "annotation", "groundtruth"]]
super().setup(full_df, full_df[["item", "groundtruth"]], c_gold_label="groundtruth")
class DecompositionExperiment(experiments.RealExperiment):
def __init__(self, eval_fn, label_colname, item_colname, uid_colname, distance_fn=None, **kwargs):
super().__init__(eval_fn, label_colname, item_colname, uid_colname, distance_fn, **kwargs)
self.gran_exp = experiments.RealExperiment(self.eval_fn, self.label_colname, "newItemID", self.uid_colname)
self.gran_exp_orc = experiments.RealExperiment(self.eval_fn, self.label_colname, "newItemID", self.uid_colname)
def setup(self, annodf, golddf, c_gold_item=None, c_gold_label=None):
super().setup(annodf=annodf, golddf=golddf, c_gold_item=c_gold_item, c_gold_label=c_gold_label)
granno_df = granularity.fragment_by_overlaps(self)
granno_df_orc = granularity.fragment_by_overlaps(self, use_oracle=True)
self.gran_exp.setup(granno_df, merge_index="origItemID")
self.gran_exp_orc.setup(granno_df_orc, merge_index="origItemID")
def register_weighted_merge(self):
self.gran_exp.merge_fn = self.merge_fn
self.gran_exp_orc.merge_fn = self.merge_fn
super().register_weighted_merge()
def train(self, dem_iter, mas_iter):
super().train(dem_iter=dem_iter, mas_iter=mas_iter)
self.gran_exp.train(dem_iter=dem_iter, mas_iter=mas_iter)
self.gran_exp_orc.train(dem_iter=dem_iter, mas_iter=mas_iter)
def test(self, debug):
super().test(debug=debug)
self.gran_exp.test_merged_granular(orig_golddict=self.golddict, debug=debug)
self.gran_exp_orc.test_merged_granular(orig_golddict=self.golddict, debug=debug)
gran_sb = {F"GRANULAR {k}": v for k, v in self.gran_exp.scoreboard.items()}
gran_orc_sb = {F"GRANULAR ORACLE {k}": v for k, v in self.gran_exp_orc.scoreboard.items()}
self.scoreboard = {**self.scoreboard, **gran_sb, **gran_orc_sb}
class PICOExperiment(DecompositionExperiment):
def __init__(self, **kwargs):
super().__init__(lambda x,y: eval_f1(x, y, strict_range=False, strict_tag=False, str_spans=False),
"label", "itemID", "uid")
self.rawdf = | pd.read_json("data/PICO/PICO-annos-crowdsourcing.json", lines=True) | pandas.read_json |
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
import pandas._testing as tm
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
_msg_validate_usecols_names = (
"Usecols do not match columns, columns expected but not found: {0}"
)
def test_raise_on_mixed_dtype_usecols(all_parsers):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
usecols = [0, "b", 2]
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
def test_usecols(all_parsers, usecols):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_names(all_parsers):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
names = ["foo", "bar"]
result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
)
def test_usecols_relative_to_names(all_parsers, names, usecols):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_relative_to_names2(all_parsers):
# see gh-5766
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
)
expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_usecols_name_length_conflict(all_parsers):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
msg = "Number of passed names did not match number of header fields in the file"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1])
def test_usecols_single_string(all_parsers):
# see gh-20558
parser = all_parsers
data = """foo, bar, baz
1000, 2000, 3000
4000, 5000, 6000"""
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols="foo")
@pytest.mark.parametrize(
"data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"]
)
def test_usecols_index_col_false(all_parsers, data):
# see gh-9082
parser = all_parsers
usecols = ["a", "c", "d"]
expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]})
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", ["b", 0])
@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]])
def test_usecols_index_col_conflict(all_parsers, usecols, index_col):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b"))
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_usecols_index_col_conflict2(all_parsers):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")})
expected = expected.set_index(["b", "c"])
result = parser.read_csv(
StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_usecols_implicit_index_col(all_parsers):
# see gh-2654
parser = all_parsers
data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
result = parser.read_csv(StringIO(data), usecols=["a", "b"])
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(all_parsers):
# see gh-2733
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(all_parsers):
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"usecols,expected",
[
# Column selection by index.
([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])),
# Column selection by name.
(["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"])),
],
)
def test_usecols_with_integer_like_header(all_parsers, usecols, expected):
parser = all_parsers
data = """2,0,1
1000,2000,3000
4000,5000,6000"""
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])
def test_usecols_with_parse_dates(all_parsers, usecols):
# see gh-9755
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parser = all_parsers
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates2(all_parsers):
# see gh-13604
parser = all_parsers
data = """2008-02-07 09:40,1032.43
2008-02-07 09:50,1042.54
2008-02-07 10:00,1051.65"""
names = ["date", "values"]
usecols = names[:]
parse_dates = [0]
index = Index(
[
Timestamp("2008-02-07 09:40"),
Timestamp("2008-02-07 09:50"),
Timestamp("2008-02-07 10:00"),
],
name="date",
)
cols = {"values": [1032.43, 1042.54, 1051.65]}
expected = DataFrame(cols, index=index)
result = parser.read_csv(
StringIO(data),
parse_dates=parse_dates,
index_col=0,
usecols=usecols,
header=None,
names=names,
)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates3(all_parsers):
# see gh-14792
parser = all_parsers
data = """a,b,c,d,e,f,g,h,i,j
2016/09/21,1,1,2,3,4,5,6,7,8"""
usecols = list("abcdefghij")
parse_dates = [0]
cols = {
"a": Timestamp("2016-09-21"),
"b": [1],
"c": [1],
"d": [2],
"e": [3],
"f": [4],
"g": [5],
"h": [6],
"i": [7],
"j": [8],
}
expected = DataFrame(cols, columns=usecols)
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates4(all_parsers):
data = "a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8"
usecols = list("abcdefghij")
parse_dates = [[0, 1]]
parser = all_parsers
cols = {
"a_b": "2016/09/21 1",
"c": [1],
"d": [2],
"e": [3],
"f": [4],
"g": [5],
"h": [6],
"i": [7],
"j": [8],
}
expected = DataFrame(cols, columns=["a_b"] + list("cdefghij"))
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])
@pytest.mark.parametrize(
"names",
[
list("abcde"), # Names span all columns in original data.
list("acd"), # Names span only the selected columns.
],
)
def test_usecols_with_parse_dates_and_names(all_parsers, usecols, names):
# see gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
parser = all_parsers
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
result = parser.read_csv(
StringIO(s), names=names, parse_dates=parse_dates, usecols=usecols
)
tm.assert_frame_equal(result, expected)
def test_usecols_with_unicode_strings(all_parsers):
# see gh-13219
data = """AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"AAA": {0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002},
"BBB": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=["AAA", "BBB"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_single_byte_unicode_strings(all_parsers):
# see gh-13219
data = """A,B,C,D
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"A": {0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002},
"B": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [["AAA", b"BBB"], [b"AAA", "BBB"]])
def test_usecols_with_mixed_encoding_strings(all_parsers, usecols):
data = """AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [["あああ", "いい"], ["あああ", "いい"]])
def test_usecols_with_multi_byte_characters(all_parsers, usecols):
data = """あああ,いい,ううう,ええええ
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"あああ": {0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002},
"いい": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
def test_empty_usecols(all_parsers):
data = "a,b,c\n1,2,3\n4,5,6"
expected = DataFrame()
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=set())
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import numpy as np
import pandas as pd
from bach import Series, DataFrame
from bach.operations.cut import CutOperation, QCutOperation
from sql_models.util import quote_identifier
from tests.functional.bach.test_data_and_utils import assert_equals_data
PD_TESTING_SETTINGS = {
'check_dtype': False,
'check_exact': False,
'atol': 1e-3,
}
def compare_boundaries(expected: pd.Series, result: Series) -> None:
for exp, res in zip(expected.to_numpy(), result.to_numpy()):
if not isinstance(exp, pd.Interval):
assert res is None or np.isnan(res)
continue
np.testing.assert_almost_equal(exp.left, float(res.left), decimal=2)
np.testing.assert_almost_equal(exp.right, float(res.right), decimal=2)
if exp.closed_left:
assert res.closed_left
if exp.closed_right:
assert res.closed_right
def test_cut_operation_pandas(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=10)
result = CutOperation(series=series, bins=10)()
compare_boundaries(expected, result)
expected_wo_right = pd.cut(p_series, bins=10, right=False)
result_wo_right = CutOperation(series, bins=10, right=False)()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_bach(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
ranges = [
pd.Interval(0, 9.9, closed='both'),
pd.Interval(9.9, 19.8, closed='right'),
pd.Interval(19.8, 29.7, closed='right'),
pd.Interval(29.7, 39.6, closed='right'),
pd.Interval(39.6, 49.5, closed='right'),
pd.Interval(49.5, 59.4, closed='right'),
pd.Interval(59.4, 69.3, closed='right'),
pd.Interval(69.3, 79.2, closed='right'),
pd.Interval(79.2, 89.1, closed='right'),
pd.Interval(89.1, 99, closed='right'),
]
expected = pd.Series({num: ranges[int(num / 10)] for num in range(100)})
result = CutOperation(series=series, bins=10, method='bach')().sort_index()
compare_boundaries(expected, result)
ranges_wo_right = [
pd.Interval(0, 9.9, closed='left'),
pd.Interval(9.9, 19.8, closed='left'),
pd.Interval(19.8, 29.7, closed='left'),
pd.Interval(29.7, 39.6, closed='left'),
pd.Interval(39.6, 49.5, closed='left'),
pd.Interval(49.5, 59.4, closed='left'),
pd.Interval(59.4, 69.3, closed='left'),
pd.Interval(69.3, 79.2, closed='left'),
pd.Interval(79.2, 89.1, closed='left'),
pd.Interval(89.1, 99, closed='both'),
]
expected_wo_right = pd.Series({num: ranges_wo_right[int(num / 10)] for num in range(100)})
result_wo_right = CutOperation(series=series, bins=10, method='bach', right=False)().sort_index()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_boundary(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 2, 3, 4], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=bins, right=True)
result = CutOperation(series=series, bins=bins, right=True)()
compare_boundaries(expected, result)
def test_cut_w_ignore_index(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 2, 3, 4], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
result = CutOperation(series=series, bins=bins, right=True, ignore_index=False)()
assert ['_index_0', 'a'] == list(result.index.keys())
result_w_ignore = CutOperation(series=series, bins=bins, right=True, ignore_index=True)()
assert ['a'] == list(result_w_ignore.index.keys())
def test_cut_w_include_empty_bins(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 1, 2, 3, 6, 7, 8], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
result = CutOperation(
series=series, bins=bins, include_empty_bins=True,
)().sort_index()
empty_interval = pd.Interval(3.333, 5.667)
expected_data = [
pd.Interval(0.993, 3.333),
pd.Interval(0.993, 3.333),
pd.Interval(0.993, 3.333),
pd.Interval(0.993, 3.333),
pd.Interval(5.667, 8),
pd.Interval(5.667, 8),
pd.Interval(5.667, 8),
empty_interval,
]
expected_index = [1., 1., 2., 3., 6., 7., 8., np.nan]
expected = pd.Series(data=expected_data, index=expected_index)
compare_boundaries(expected, result)
def test_cut_operation_calculate_bucket_properties(engine) -> None:
final_properties = ['a_min', 'a_max', 'bin_adjustment', 'step']
bins = 2
# min != max
p_series_neq = pd.Series(data=[1, 3, 5, 16, 2, 20], name='a')
series_neq = DataFrame.from_pandas(engine=engine, df=p_series_neq.to_frame(), convert_objects=True).a
result_neq = CutOperation(series=series_neq, bins=bins)._calculate_bucket_properties()
expected_neq = pd.DataFrame(
data={
'a_min': [1], # min(a) - min_adjustment
'a_max': [20], # max(a) + max_adjustment
'min_adjustment': [0], # min(a) != max(a)
'max_adjustment': [0], # min(a) != max(a)
'bin_adjustment': [0.019], # (max(a) - min(a)) * range_adjustment
'step': [9.5], # (max(a) - min(a)) / bins
},
)
pd.testing.assert_frame_equal(expected_neq[final_properties], result_neq.to_pandas(), check_dtype=False)
# min == max
p_series_eq = pd.Series(data=[2, 2], name='a')
series_eq = DataFrame.from_pandas(engine=engine, df=p_series_eq.to_frame(), convert_objects=True).a
result_eq = CutOperation(series=series_eq, bins=bins)._calculate_bucket_properties()
expected_eq = pd.DataFrame(
data={
'a_min': [1.998],
'a_max': [2.002],
'min_adjustment': [0.002], # if min(a) == max(a): range_adjustment * abs(min(a))
'max_adjustment': [0.002], # if min(a) == max(a): range_adjustment * abs(max(a))
'bin_adjustment': [0.],
'step': [0.002],
},
)
pd.testing.assert_frame_equal(expected_eq[final_properties], result_eq.to_pandas(), **PD_TESTING_SETTINGS)
# min == max == 0
p_series_zero = pd.Series(data=[0, 0, 0, 0], name='a')
series_zero = DataFrame.from_pandas(engine=engine, df=p_series_zero.to_frame(), convert_objects=True).a
result_zero = CutOperation(series=series_zero, bins=bins)._calculate_bucket_properties()
expected_zero = pd.DataFrame(
data={
'a_min': [-0.001],
'a_max': [0.001],
'min_adjustment': [0.001], # if min(a) == max(a) == 0: range_adjustment
'max_adjustment': [0.001], # if min(a) == max(a) == 0: range_adjustment
'bin_adjustment': [0.],
'step': [0.001],
},
)
pd.testing.assert_frame_equal(expected_zero[final_properties], result_zero.to_pandas(), **PD_TESTING_SETTINGS)
def test_cut_calculate_pandas_adjustments(engine) -> None:
pdf = pd.DataFrame(data={'min': [1], 'max': [100]})
df = DataFrame.from_pandas(engine=engine, df=pdf, convert_objects=True)
to_adjust = df['min']
to_compare = df['max']
result = CutOperation(series=df['min'], bins=1)._calculate_pandas_adjustments(to_adjust, to_compare)
assert isinstance(result, Series)
result_case_sql = result.expression.to_sql(df.engine.dialect)
max_identifier = quote_identifier(engine, 'max')
min_idenfifier = quote_identifier(engine, 'min')
expected_case_sql = (
f'case when {max_identifier} = {min_idenfifier} then\n'
f'case when {min_idenfifier} != 0 then 0.001 * abs({min_idenfifier}) else 0.001 end\n'
'else 0 end'
)
assert expected_case_sql == result_case_sql
def test_cut_calculate_bucket_ranges(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 1, 2, 3, 4, 5, 6, 7, 8], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
cut_operation = CutOperation(series=series, bins=bins)
bucket_properties_df = cut_operation._calculate_bucket_properties()
result = cut_operation._calculate_bucket_ranges(bucket_properties_df)
assert_equals_data(
result,
order_by=['lower_bound'],
expected_columns=['bucket', 'lower_bound', 'upper_bound', 'bounds'],
expected_data=[
[1, 0.993, 3.333, '(]'],
[2, 3.333, 5.667, '(]'],
[3, 5.667, 8, '(]'],
],
round_decimals=True,
decimal=3,
)
def test_qcut_operation(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected_w_list = pd.qcut(p_series, q=[0.25, 0.3, 0.7, 0.9])
result_w_list = QCutOperation(series=series, q=[0.25, 0.3, 0.7, 0.9])()
compare_boundaries(expected_w_list, result_w_list)
expected_q_num = pd.qcut(p_series, q=4)
result_q_num = QCutOperation(series=series, q=4)()
compare_boundaries(expected_q_num, result_q_num)
def test_qcut_operation_one_quantile(engine) -> None:
p_series = pd.Series(range(10), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.qcut(p_series, q=0)
result = QCutOperation(series=series, q=0)()
compare_boundaries(expected, result)
expected2 = | pd.qcut(p_series, q=[0.5]) | pandas.qcut |
# Import Libraries
import time
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Import Libraries
from scipy import stats
import matplotlib.pyplot as plt
# import time
# Import Libraries
import math
class YinsDL:
print("---------------------------------------------------------------------")
print(
"""
Yin's Deep Learning Package
Copyright © W.Y.N. Associates, LLC, 2009 – Present
For more information, please go to https://wyn-associates.com/
""" )
print("---------------------------------------------------------------------")
# Define function
def NN3_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True):
"""
MANUAL:
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = NN3_Classifier(X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
num_of_epochs=50)
"""
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Model
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train, y_train, epochs=num_of_epochs)
# Prediction
predictions = model.predict(X_test)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = pd.DataFrame(confusion)
test_acc = sum(np.diag(confusion)) / sum(sum(np.array(confusion)))
# Print
if verbose:
print("Confusion Matrix:")
print(confusion)
print("Test Accuracy:", round(test_acc, 4))
# ROCAUC
if layer3size == 2:
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3)
}
if verbose:
print(f'Test AUC: {areaUnderROC}')
if plotROC:
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
else:
resultsROC = "Response not in two classes."
# Output
return {
'Data': [X_train, y_train, X_test, y_test],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)],
'Model Fitting': model,
'Performance': {
'response': {'response': y_test, 'estimated response': y_test_hat},
'test_acc': test_acc,
'confusion': confusion
},
'Results of ROC': resultsROC
}
# End of function
# Define function
def NN10_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='relu', l4_act='relu', l5_act='relu',
l6_act='relu', l7_act='relu', l8_act='relu', l9_act='relu', l10_act='softmax',
layer1size=128, layer2size=64, layer3size=64, layer4size=64, layer5size=64,
layer6size=64, layer7size=64, layer8size=64, layer9size=64, layer10size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True):
"""
MANUAL:
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = NN10_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='relu', l4_act='relu', l5_act='relu',
l6_act='relu', l7_act='relu', l8_act='relu', l9_act='relu', l10_act='softmax',
layer1size=128, layer2size=64, layer3size=64, layer4size=64, layer5size=64,
layer6size=64, layer7size=64, layer8size=64, layer9size=64, layer10size=2,
plotROC=True,
num_of_epochs=50)
"""
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Model
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act),
keras.layers.Dense(units=layer4size, activation=l4_act),
keras.layers.Dense(units=layer5size, activation=l5_act),
keras.layers.Dense(units=layer6size, activation=l6_act),
keras.layers.Dense(units=layer7size, activation=l7_act),
keras.layers.Dense(units=layer8size, activation=l8_act),
keras.layers.Dense(units=layer9size, activation=l9_act),
keras.layers.Dense(units=layer10size, activation=l10_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train, y_train, epochs=num_of_epochs)
# Prediction
predictions = model.predict(X_test)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = pd.DataFrame(confusion)
test_acc = sum(np.diag(confusion)) / sum(sum(np.array(confusion)))
# Print
if verbose:
print("Confusion Matrix:")
print(confusion)
print("Test Accuracy:", round(test_acc, 4))
# ROCAUC
if layer10size == 2:
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3)
}
if verbose:
print(f'Test AUC: {areaUnderROC}')
if plotROC:
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
else:
resultsROC = "Response not in two classes."
# Output
return {
'Data': [X_train, y_train, X_test, y_test],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)],
'Model Fitting': model,
'Performance': {
'response': {'response': y_test, 'estimated response': y_test_hat},
'test_acc': test_acc,
'confusion': confusion
},
'Results of ROC': resultsROC
}
# End of function
# Define function
def plotOneImage(
initialPosX = 1,
initialPosY = 0,
boxWidth = 1,
boxHeight = 0,
linewidth = 2,
edgecolor = 'r',
IMAGE = 0):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
im = np.array(IMAGE, dtype=np.uint8)
# Create figure and axes
fig,ax = plt.subplots(1)
# Display the image
ax.imshow(im)
# Create a Rectangle patch
rect = patches.Rectangle(
(initialPosX, initialPosY), boxWidth, boxHeight,
linewidth=linewidth, edgecolor=edgecolor, facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
# End of function
# Define function
def ConvOperationC1(
X_train, y_train, X_test, y_test,
inputSHAPEwidth=10, inputSHAPElenth=3,
filter1 = [[1,0], [0,1]],
verbose=True, printManual=True):
if printManual:
print("----------------------------------------------------------------------")
print("Manual")
print(
"""
This script input X_train, y_train, X_test, y_test with selected input width and height
as well as a filter. Then the script executes convolutional operation to compute new
features from combination of original variables and the filter.
Note: the filter plays crucial role which is why this function the filter is user-friendly
and can be updated as the user see fits.
# Run
newDataGenerated = YinsDL.ConvOperationC1(
X_train, y_train, X_test, y_test,
inputSHAPEwidth=10, inputSHAPElenth=3,
filter1 = [[1,0], [0,1]],
verbose=True, printManual=True)
""" )
print("----------------------------------------------------------------------")
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Convolutional Operation
X_train = np.reshape(np.array(X_train), (X_train.shape[0], inputSHAPEwidth, inputSHAPElenth))
X_test = np.reshape(np.array(X_test), (X_test.shape[0], inputSHAPEwidth, inputSHAPElenth))
if verbose:
print('Shapes of X in training set', X_train.shape, 'Shapes of X in test set:', X_test.shape)
# Filter
filter1 = pd.DataFrame(filter1)
# Convolutional Operation (called Yins to make it different from default function)
def YinsConvOp(incidence=0, X=X_train, unitFilter=filter1):
filterHeight = unitFilter.shape[0]
filterWidth = unitFilter.shape[1]
unitSample = []
for i in range(pd.DataFrame(X[incidence]).shape[0] - (filterHeight - 1)):
for j in range(pd.DataFrame(X[incidence]).shape[1] - (filterWidth - 1)):
unitSample.append(
np.multiply(
pd.DataFrame(X[incidence]).iloc[i:(i + filterWidth), j:(j + filterHeight)],
unitFilter).sum(axis=1).sum())
return unitSample
# Apply Operation
X_train_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_train, unitFilter=filter1)])
for i in range(1, X_train.shape[0]):
X_train_new = pd.concat([
X_train_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_train, unitFilter=filter1)]) ])
# For Prediction
X_test_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_test, unitFilter=filter1)])
for i in range(1, X_test.shape[0]):
X_test_new = pd.concat([
X_test_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_test, unitFilter=filter1)]) ])
# Output
return {
'Data': [X_train, y_train, X_test, y_test, X_train_new, X_test_new],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)]
}
# End function
# Define function
def C1NN3_Classifier(
X_train, y_train, X_test, y_test,
inputSHAPEwidth=10, inputSHAPElenth=3,
filter1 = [[1,0], [0,1]],
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True):
"""
MANUAL:
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = NN3_Classifier(X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
num_of_epochs=50)
"""
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Convolutional Operation
X_train = np.reshape(np.array(X_train), (X_train.shape[0], inputSHAPEwidth, inputSHAPElenth))
X_test = np.reshape(np.array(X_test), (X_test.shape[0], inputSHAPEwidth, inputSHAPElenth))
if verbose:
print('Shapes of X in training set', X_train.shape, 'Shapes of X in test set:', X_test.shape)
# Filter
filter1 = pd.DataFrame(filter1)
# Convolutional Operation (called Yins to make it different from default function)
def YinsConvOp(incidence=0, X=X_train, unitFilter=filter1):
filterHeight = unitFilter.shape[0]
filterWidth = unitFilter.shape[1]
unitSample = []
for i in range(pd.DataFrame(X[incidence]).shape[0] - (filterHeight - 1)):
for j in range(pd.DataFrame(X[incidence]).shape[1] - (filterWidth - 1)):
unitSample.append(
np.multiply(
pd.DataFrame(X[incidence]).iloc[i:(i + filterWidth), j:(j + filterHeight)],
unitFilter).sum(axis=1).sum())
return unitSample
# Apply Operation
X_train_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_train, unitFilter=filter1)])
for i in range(1, X_train.shape[0]):
X_train_new = pd.concat([
X_train_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_train, unitFilter=filter1)]) ])
# Model
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train_new.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train_new, y_train, epochs=num_of_epochs)
# Prediction
X_test_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_test, unitFilter=filter1)])
for i in range(1, X_test.shape[0]):
X_test_new = pd.concat([
X_test_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_test, unitFilter=filter1)]) ])
predictions = model.predict(X_test_new)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = pd.DataFrame(confusion)
test_acc = sum(np.diag(confusion)) / sum(sum(np.array(confusion)))
# Print
if verbose:
print("Confusion Matrix:")
print(confusion)
print("Test Accuracy:", round(test_acc, 4))
# ROCAUC
if layer3size == 2:
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3)
}
if verbose:
print(f'Test AUC: {areaUnderROC}')
if plotROC:
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
else:
resultsROC = "Response not in two classes."
# Output
return {
'Data': [X_train, y_train, X_test, y_test, X_train_new, X_test_new],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)],
'Model Fitting': model,
'Performance': {
'response': {'response': y_test, 'estimated response': y_test_hat},
'test_acc': test_acc,
'confusion': confusion
},
'Results of ROC': resultsROC
}
# End of function
# Define function
def C2NN3_Classifier(
X_train, y_train, X_test, y_test,
inputSHAPEwidth1=10, inputSHAPElenth1=3,
inputSHAPEwidth2=8, inputSHAPElenth2=9,
filter1 = [[1,0], [0,1]],
filter2 = [[1,0], [0,1]],
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True,
printManual=False):
if printManual:
print("--------------------------------------------------------------------")
print("MANUAL:")
print(
"""
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = C2NN3_Classifier(
X_train, y_train, X_test, y_test,
inputSHAPEwidth1=10, inputSHAPElenth1=3,
inputSHAPEwidth2=8, inputSHAPElenth2=9,
filter1 = [[1,0], [0,1]],
filter2 = [[1,0], [0,1]],
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True,
printManual=True
""" )
print("--------------------------------------------------------------------")
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Convolutional Operation
X_train = np.reshape(np.array(X_train), (X_train.shape[0], inputSHAPEwidth1, inputSHAPElenth1))
X_test = np.reshape(np.array(X_test), (X_test.shape[0], inputSHAPEwidth1, inputSHAPElenth1))
if verbose:
print('Shapes of X in training set', X_train.shape, 'Shapes of X in test set:', X_test.shape)
# Filter
filter1 = pd.DataFrame(filter1)
filter2 = pd.DataFrame(filter2)
# Convolutional Operation (called Yins to make it different from default function)
def YinsConvOp(incidence=0, X=X_train, unitFilter=filter1):
filterHeight = unitFilter.shape[0]
filterWidth = unitFilter.shape[1]
unitSample = []
for i in range(pd.DataFrame(X[incidence]).shape[0] - (filterHeight - 1)):
for j in range(pd.DataFrame(X[incidence]).shape[1] - (filterWidth - 1)):
unitSample.append(
np.multiply(
pd.DataFrame(X[incidence]).iloc[i:(i + filterWidth), j:(j + filterHeight)],
unitFilter).sum(axis=1).sum())
return unitSample
# Apply Operation
# Engineer the 1st convolutional layer
start = time.time()
X_train_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_train, unitFilter=filter1)])
for i in range(1, X_train.shape[0]):
X_train_new = pd.concat([
X_train_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_train, unitFilter=filter1)]) ])
end = time.time()
# Time Check
if verbose == True:
print('The 1st convolutional layer is done.')
print('Time Consumption (in sec):', round(end - start, 2))
print('Time Consumption (in min):', round((end - start)/60, 2))
print('Time Consumption (in hr):', round((end - start)/60/60, 2))
# Reshape
start = time.time()
X_train_new_copy = np.reshape(np.array(X_train_new), (X_train_new.shape[0], inputSHAPEwidth2, inputSHAPElenth2))
if verbose:
print("Shape of X in training set:", X_train_new_copy.shape)
# Engineer the 2nd convolutional layer
X_train_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_train_new_copy, unitFilter=filter2)])
for i in range(1, X_train_new_copy.shape[0]):
X_train_new = pd.concat([
X_train_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_train_new_copy, unitFilter=filter2)]) ])
end = time.time()
# Time Check
if verbose == True:
print("The 2nd convoluational layer is done. Shape of X in training set:", X_train_new_copy.shape)
print('Time Consumption (in sec):', round(end - start, 2))
print('Time Consumption (in min):', round((end - start)/60, 2))
print('Time Consumption (in hr):', round((end - start)/60/60, 2))
# Model
start = time.time()
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train_new.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train_new, y_train, epochs=num_of_epochs)
end = time.time()
# Time Check
if verbose == True:
print('Training Completed.')
print('Time Consumption (in sec):', round(end - start, 2))
print('Time Consumption (in min):', round((end - start)/60, 2))
print('Time Consumption (in hr):', round((end - start)/60/60, 2))
# Prediction
# Engineer the 1st convolutional layer
X_test_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_test, unitFilter=filter1)])
for i in range(1, X_test.shape[0]):
X_test_new = pd.concat([
X_test_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_test, unitFilter=filter1)]) ])
# Reshape
X_test_new_copy = np.reshape(np.array(X_test_new), (X_test_new.shape[0], inputSHAPEwidth2, inputSHAPElenth2))
# Engineer the 2nd convolutional layer
X_test_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_test_new_copy, unitFilter=filter2)])
for i in range(1, X_test_new_copy.shape[0]):
X_test_new = pd.concat([
X_test_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_test_new_copy, unitFilter=filter2)]) ])
# Predict
predictions = model.predict(X_test_new)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = pd.DataFrame(confusion)
test_acc = sum(np.diag(confusion)) / sum(sum(np.array(confusion)))
# Print
if verbose:
print("Confusion Matrix:")
print(confusion)
print("Test Accuracy:", round(test_acc, 4))
# ROCAUC
if layer3size == 2:
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3)
}
if verbose:
print(f'Test AUC: {areaUnderROC}')
if plotROC:
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
else:
resultsROC = "Response not in two classes."
# Output
return {
'Data': [X_train, y_train, X_test, y_test, X_train_new, X_test_new],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)],
'Model Fitting': model,
'Performance': {
'response': {'response': y_test, 'estimated response': y_test_hat},
'test_acc': test_acc,
'confusion': confusion
},
'Results of ROC': resultsROC
}
# End of function
# Define Function
def RNN4_Regressor(
start_date = '2013-01-01',
end_date = '2019-12-6',
tickers = 'AAPL', cutoff = 0.8,
l1_units = 50, l2_units = 50, l3_units = 50, l4_units = 50,
optimizer = 'adam', loss = 'mean_squared_error',
epochs = 50, batch_size = 64,
plotGraph = True,
verbatim = True
):
"""
MANUAL
# Load Package
%run "../scripts/YinsMM.py"
# Run
tmp = YinsDL.RNN4_Regressor(
start_date = '2013-01-01',
end_date = '2019-12-6',
tickers = 'AMD', cutoff = 0.8,
l1_units = 50, l2_units = 50, l3_units = 50, l4_units = 50,
optimizer = 'adam', loss = 'mean_squared_error',
epochs = 50, batch_size = 64,
plotGraph = True,
verbatim = True )
"""
# Initiate Environment
from scipy import stats
import pandas as pd
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
# Define function
def getDatafromYF(ticker, start_date, end_date):
stockData = yf.download(ticker, start_date, end_date)
return stockData
# End function
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
tickers = [tickers]
# Start with Dictionary (this is where data is saved)
stockData = {}
for i in tickers:
stockData[i] = pd.DataFrame(getDatafromYF(str(i), start_date, end_date))
close = stockData[i]['Adj Close']
stockData[i]['Normalize Return'] = close / close.shift() - 1
# Take a look
# print(stockData[tickers[0]].head(2)) # this is desired stock
# print(stockData[tickers[1]].head(2)) # this is benchmark (in this case, it is S&P 500 SPDR Index Fund: SPY)
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
stockData[tickers[0]].iloc[:, 4].head(3)
data = stockData[tickers[0]].iloc[:, 4:5].values
sc = MinMaxScaler(feature_range = (0, 1))
scaled_dta = sc.fit_transform(data)
scaled_dta = pd.DataFrame(scaled_dta)
training_set = scaled_dta.iloc[0:round(scaled_dta.shape[0] * cutoff), :]
testing_set = scaled_dta.iloc[round(cutoff * scaled_dta.shape[0] + 1):scaled_dta.shape[0], :]
# print(training_set.shape, testing_set.shape)
X_train = []
y_train = []
for i in range(100, training_set.shape[0]):
X_train.append(np.array(training_set)[i-100:i, 0])
y_train.append(np.array(training_set)[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
print(X_train.shape, y_train.shape)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
print(X_train.shape)
X_test = []
y_test = []
for i in range(100, testing_set.shape[0]):
X_test.append(np.array(testing_set)[i-100:i, 0])
y_test.append(np.array(testing_set)[i, 0])
X_test, y_test = np.array(X_test), np.array(y_test)
print(X_test.shape, y_test.shape)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print(X_test.shape)
### Build RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialize RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 1))
regressor.summary()
### Train RNN
# Compiling the RNN
regressor.compile(optimizer = optimizer, loss = loss)
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = epochs, batch_size = batch_size)
### Predictions
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
real_stock_price = np.reshape(y_test, (y_test.shape[0], 1))
real_stock_price = sc.inverse_transform(real_stock_price)
### Performance Visualization
# Visualising the results
import matplotlib.pyplot as plt
if plotGraph:
plt.plot(real_stock_price, color = 'red', label = f'Real {tickers[0]} Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = f'Predicted {tickers[0]} Stock Price')
plt.title(f'{tickers[0]} Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel(f'{tickers[0]} Stock Price')
plt.legend()
plt.show()
import math
from sklearn.metrics import mean_squared_error
rmse = np.sqrt(mean_squared_error(real_stock_price, predicted_stock_price))
if verbatim:
print(f'Root Mean Square Error is {round(rmse,2)} for test set.')
print(f'Interpretation: ---------------')
print(f'On the test set, the performance of this LSTM architecture guesses ')
print(f'{tickers[0]} stock price on average within the error of ${round(rmse,2)} dollars.')
# Output
return {
'Information': [training_set.shape, testing_set.shape],
'Data': [X_train, y_train, X_test, y_test],
'Test Response': [predicted_stock_price, real_stock_price],
'Test Error': rmse
}
# End function
# define NeuralNet_Regressor function:
def NeuralNet_Regressor(
X_train=None,
y_train=None,
X_valid=None,
y_valid=None,
X_test=None,
y_test=None,
name_of_architecture="ANN",
input_shape=8,
use_auxinput=True,
num_of_res_style_block=None,
hidden=[128,64,32,10],
output_shape=1,
activation="relu",
last_activation="sigmoid",
learning_rate=0.001,
loss="mse",
name_of_optimizer="adam",
epochs=10,
plotModelSummary=True,
axis_font_size=20,
which_layer=None,
X_for_internal_extraction=None,
useGPU=False,
use_earlystopping=False,
do_plot=False,
verbose=True
):
# library
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import time
# define model
def build_model(input_shape=input_shape, use_auxinput=use_auxinput, num_of_res_style_block=num_of_res_style_block,
hidden=hidden, output_shape=output_shape, learning_rate=learning_rate,
loss="mse", activation=activation, last_activation=last_activation, name_of_optimizer=name_of_optimizer):
# model = tf.keras.models.Sequential(name=name_of_architecture)
inputs = keras.Input(shape=(input_shape,), name="input_layer")
if use_auxinput:
aux_input = inputs
# Set up the input layer or the 1st hidden layer
dense = layers.Dense(hidden[0], activation=activation, name=str('dense1'))
x = dense(inputs)
# What type of API are we using for hidden layer?
l = 2
for layer in hidden[1::]:
dense = layers.Dense(layer, activation=activation, name=str('dense'+str(l)))
x = dense(x)
l = l + 1
# Merge all available features into a single large vector via concatenation
if use_auxinput:
x = layers.concatenate([x, aux_input])
# Optional: design residual style block if num_of_res_style_block is an integer
# else continue
if num_of_res_style_block == None:
pass
else:
for res_i in range(num_of_res_style_block):
aux_input = x
for layer in hidden:
dense = layers.Dense(layer, activation=activation, name=str('dense'+str(l)))
x = dense(x)
l = l + 1
x = layers.concatenate([x, aux_input])
# Why do we set number of neurons (or units) to be 1 for this following layer?
outputs = layers.Dense(output_shape, name=str('dense'+str(l)))(x)
# A gentle reminder question: What is the difference between
# stochastic gradient descent and gradient descent?
if name_of_optimizer == "SGD" or name_of_optimizer == "sgd":
optimizer = tf.keras.optimizers.SGD(lr=learning_rate)
elif name_of_optimizer == "ADAM" or name_of_optimizer == "adam":
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
elif name_of_optimizer == "RMSprop" or name_of_optimizer == "rmsprop":
optimizer = tf.keras.optimizers.RMSprop(lr=learning_rate)
# Design a model
model = keras.Model(inputs=inputs, outputs=outputs, name=name_of_architecture)
# Another gentle reminder question: Why do we use mse or mean squared error?
model.compile(loss=loss, optimizer=optimizer)
return model
# create a KerasRegressor based on the model defined above
# print("Checkpoint")
# keras_reg_init = tf.keras.wrappers.scikit_learn.KerasRegressor(build_model)
keras_reg = build_model()
# plot model summary
if plotModelSummary:
import pydot
import graphviz
keras.utils.plot_model(keras_reg, name_of_architecture+".png", show_shapes=True)
print(keras_reg.summary())
# comment:
# The KerasRegressor object is a think wrapper around the Keras model
# built using build_model(). Since we did not specify any hyperparameters
# when creating it, it will use the default hyperparameters we defined in
# build_model(). This makes things convenient because we can now use
# this object just like a regular Scikit-learn regressor.
# In other words, we can use .fit(), .predict(), and all these concepts
# consistently as we discussed before.
# checkpoint
start = time.time()
# fit the model: determine whether to use GPU
if useGPU:
# %tensorflow_version 2.x
# import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found. If you are in Colab, please go to Edit => Notebook Setting to select GPU as Hardware Accelerator.')
print('Found GPU at: {}'.format(device_name))
# train
if verbose:
vb=1
else:
vb=0
print("Using GPU to compute...")
if use_earlystopping:
with tf.device('/device:GPU:0'):
history = keras_reg.fit(
X_train, y_train, epochs=epochs,
validation_data=(X_valid, y_valid),
callbacks=[tf.keras.callbacks.EarlyStopping(patience=10)],
verbose=vb)
else:
history = keras_reg.fit(
X_train, y_train, epochs=epochs,
validation_data=(X_valid, y_valid),
verbose=vb)
else:
# X_train, y_train, X_valid, y_valid, X_test, y_test
# print("Checkpoint")
if verbose:
vb=1
else:
vb=0
if use_earlystopping:
history = keras_reg.fit(
X_train, y_train, epochs=epochs,
validation_data=(X_valid, y_valid),
callbacks=[tf.keras.callbacks.EarlyStopping(patience=10)],
verbose=vb)
else:
history = keras_reg.fit(
X_train, y_train, epochs=epochs,
validation_data=(X_valid, y_valid),
verbose=vb)
# print("Checkpoint")
# checkpoint
end = time.time()
if verbose:
print('Training time consumption ' + str(end-start) + ' seconds.')
# prediction on train set
y_train_hat_ = keras_reg.predict(X_train)
# prediction on test set
y_test_hat_ = keras_reg.predict(X_test)
# library
import numpy as np
# from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import mean_squared_error
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / np.maximum(np.ones(len(y_true)), np.abs(y_true))))*100
# mean square error on train set
y_train_hat_ = y_train_hat_.reshape(-1)
y_train_hat_=pd.Series(y_train_hat_).fillna(0).tolist()
if output_shape == 1:
MAPE_train = mean_absolute_percentage_error(y_true=y_train, y_pred=y_train_hat_)
RMSE_train = mean_squared_error(y_true=y_train, y_pred=y_train_hat_) ** (.5)
else:
MAPE_train = "Output layer has shape more than 1."
RMSE_train = "Output layer has shape more than 1."
# mean square error on test set
y_test_hat_ = y_test_hat_.reshape(-1)
y_test_hat_= | pd.Series(y_test_hat_) | pandas.Series |
'''
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Created on Apr 9, 2019
@author: <NAME> (<EMAIL>)
'''
import os
import glob
import argparse
import time
import pickle
import platform
import shutil
from random import shuffle
import json
import numpy as np
import pandas as pd
import cv2 as cv
from skimage.io import imread, imsave
from scipy.linalg import norm
import h5py
import matplotlib.pyplot as plt
import ipyparallel as ipp
from keras.models import Model, load_model
from keras.layers import Input, Dense, Lambda, ZeroPadding2D
from keras.layers import LeakyReLU, Flatten, Concatenate, Reshape, ReLU
from keras.layers import Conv2DTranspose, BatchNormalization
from keras.layers.merge import add, subtract
from keras.utils import multi_gpu_model
from keras.utils.data_utils import Sequence
import keras.backend as K
from keras import optimizers
from keras.engine.input_layer import InputLayer
from yolov3_detect import make_yolov3_model, BoundBox, WeightReader, draw_boxes_v3
from face_detection import FaceDetector
# Constants.
DEBUG = True
ALPHA = 0.2
RESOURCE_TYPE_UCCS = 'uccs'
RESOURCE_TYPE_VGGFACE2 = 'vggface2'
def triplet_loss(y_true, y_pred):
# Calculate the difference of both face features and judge a same person.
x = y_pred
return K.mean(K.maximum(K.sqrt(K.sum(K.pow(x[:, 0:64] - x[:, 64:128], 2.0), axis=-1)) \
- K.sqrt(K.sum(K.pow(x[:, 0:64] - x[:, 128:192], 2.0), axis=-1)) + ALPHA, 0.))
def create_db_fi(conf):
"""Create db for face identifier."""
conf = conf['fi_conf']
if conf['resource_type'] == RESOURCE_TYPE_UCCS:
raw_data_path = conf['raw_data_path']
nn_arch = conf['nn_arch']
if not os.path.isdir(os.path.join(raw_data_path, 'subject_faces')):
os.mkdir(os.path.join(raw_data_path, 'subject_faces'))
else:
shutil.rmtree(os.path.join(raw_data_path, 'subject_faces'))
os.mkdir(os.path.join(os.path.join(raw_data_path, 'subject_faces')))
gt_df = pd.read_csv(os.path.join(raw_data_path, 'training', 'training.csv'))
gt_df_g = gt_df.groupby('SUBJECT_ID')
# Collect face region images and create db, by subject ids.
db = pd.DataFrame(columns=['subject_id', 'face_file', 'w', 'h'])
for k in gt_df_g.groups.keys():
if k == -1: continue
df = gt_df_g.get_group(k)
for i in range(df.shape[0]):
file_name = df.iloc[i, 1]
# Load an image.
image = imread(os.path.join(raw_data_path, 'training', file_name))
# Check exception.
res = df.iloc[i, 3:] > 0
if res.all() == False:
continue
# Crop a face region.
l, t, r, b = (int(df.iloc[i, 3])
, int(df.iloc[i, 4])
, int((df.iloc[i, 3] + df.iloc[i, 5] - 1))
, int((df.iloc[i, 4] + df.iloc[i, 6] - 1)))
image = image[(t - 1):(b - 1), (l - 1):(r - 1), :]
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = nn_arch['image_size']
h_p = int(h / w * nn_arch['image_size'])
pad = nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = nn_arch['image_size']
w_p = int(w / h * nn_arch['image_size'])
pad = nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Write a face region image.
face_file_name = file_name[:-4] + '_' + str(k) + '_' \
+ str(int(df.iloc[i, 3])) + '_' + str(int(df.iloc[i, 4])) + file_name[-4:]
print('Save ' + face_file_name)
imsave(os.path.join(raw_data_path, 'subject_faces', face_file_name), (image).astype('uint8'))
# Add subject face information into db.
db = pd.concat([db, pd.DataFrame({'subject_id': [k]
, 'face_file': [face_file_name]
, 'w': [w]
, 'h': [h]})])
# Save db.
db.to_csv('subject_image_db.csv')
elif conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
raw_data_path = conf['raw_data_path']
nn_arch = conf['nn_arch']
# Collect face region images and create db, by subject ids.
pClient = ipp.Client()
pView = pClient[:]
pView.push({'raw_data_path': raw_data_path, 'nn_arch': nn_arch})
with pView.sync_imports():
import numpy as np
import pandas as pd
import cv2 as cv
from skimage.io import imread, imsave
if not os.path.isdir(os.path.join(raw_data_path, 'subject_faces_vggface2')):
os.mkdir(os.path.join(raw_data_path, 'subject_faces_vggface2'))
else:
shutil.rmtree(os.path.join(raw_data_path, 'subject_faces_vggface2'))
os.mkdir(os.path.join(os.path.join(raw_data_path, 'subject_faces_vggface2')))
df = pd.read_csv(os.path.join(raw_data_path, 'loose_bb_train.csv'))
db = pd.DataFrame(columns=['subject_id', 'face_file', 'w', 'h'])
dfs = [df.iloc[i] for i in range(df.shape[0])]
#dfs = [df.iloc[i] for i in range(100)]
res = pView.map_sync(save_extracted_face, dfs)
try:
res.remove(None)
except:
pass
db = pd.concat(res)
# Save db.
db.to_csv('subject_image_vggface2_db.csv')
else:
raise ValueError('resource type is not valid.')
def save_extracted_face(df):
global raw_data_path, nn_arch
import os
cv = cv2
pd = pandas
np = numpy
id_filename = df.iloc[0].split('/')
identity = id_filename[0]
file_name = id_filename[1] + '.jpg'
x = df.iloc[1]
y = df.iloc[2]
w = df.iloc[3]
h = df.iloc[4]
if x < 0 or y < 0 or w <=0 or h <=0:
return None
# Load an image.
image = imread(os.path.join(raw_data_path, 'train', identity, file_name))
# Get a face region.
image = image[y:(y + h), x:(x + w), :]
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = nn_arch['image_size']
h_p = int(h / w * nn_arch['image_size'])
pad = nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = nn_arch['image_size']
w_p = int(w / h * nn_arch['image_size'])
pad = nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Write a face region image.
face_file_name = identity + '_' + file_name
print('Save ' + face_file_name)
imsave(os.path.join(raw_data_path, 'subject_faces_vggface2', face_file_name), (image).astype('uint8'))
# Add subject face information into db.
return pd.DataFrame({'subject_id': [identity]
, 'face_file': [face_file_name]
, 'w': [w]
, 'h': [h]})
class FaceIdentifier(object):
"""Face identifier to use yolov3."""
# Constants.
MODEL_PATH = 'face_identifier.h5'
def __init__(self, conf):
"""
Parameters
----------
conf: dictionary
Face detector configuration dictionary.
"""
# Initialize.
self.conf = conf['fi_conf']
self.raw_data_path = self.conf['raw_data_path']
self.hps = self.conf['hps']
self.nn_arch = self.conf['nn_arch']
self.model_loading = self.conf['model_loading']
if self.model_loading:
if self.conf['multi_gpu']:
self.model = load_model(self.MODEL_PATH, custom_objects={'triplet_loss': triplet_loss})
self.parallel_model = multi_gpu_model(self.model, gpus=self.conf['num_gpus'])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.parallel_model.compile(optimizer=opt, loss=triplet_loss)
else:
self.model = load_model(self.MODEL_PATH, custom_objects={'triplet_loss': triplet_loss})
else:
# Design the face identification model.
# Inputs.
input_a = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_a')
input_p = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_p')
input_n = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_n')
# Load yolov3 as the base model.
base = self.YOLOV3Base
base.name = 'base'
# Get triplet facial ids.
xa = base(input_a) # Non-linear.
xa = Flatten()(xa)
c_dense_layer = Dense(self.nn_arch['dense1_dim'], activation='relu', name='dense1')
l2_norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='l2_norm_layer')
xa = c_dense_layer(xa)
xa = l2_norm_layer(xa)
xp = base(input_p)
xp = Flatten()(xp)
xp = c_dense_layer(xp)
xp = l2_norm_layer(xp)
xn = base(input_n)
xn = Flatten()(xn)
xn = c_dense_layer(xn)
xn = l2_norm_layer(xn)
output = Concatenate(name='output')([xa, xp, xn]) #?
if self.conf['multi_gpu']:
self.model = Model(inputs=[input_a, input_p, input_n], outputs=[output])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.model.compile(optimizer=opt, loss=triplet_loss)
self.model.summary()
self.parallel_model = multi_gpu_model(Model(inputs=[input_a, input_p, input_n], outputs=[output])
, gpus=self.conf['num_gpus'])
self.parallel_model.compile(optimizer=opt, loss=triplet_loss)
self.parallel_model.summary()
else:
self.model = Model(inputs=[input_a, input_p, input_n], outputs=[output])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.model.compile(optimizer=opt, loss=triplet_loss)
self.model.summary()
# Create face detector.
self.fd = FaceDetector(conf['fd_conf'])
# Make fid extractor and face identifier.
self._make_fid_extractor()
def _make_fid_extractor(self):
"""Make facial id extractor."""
# Design the face identification model.
# Inputs.
input1 = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input1')
# Load yolov3 as the base model.
base = self.model.get_layer('base')
# Get facial id.
x = base(input1) # Non-linear.
x = Flatten()(x)
x = self.model.get_layer('dense1')(x)
x = self.model.get_layer('l2_norm_layer')(x)
facial_id = x
self.fid_extractor = Model(inputs=[input1], outputs=[facial_id])
@property
def YOLOV3Base(self):
"""Get yolov3 as a base model.
Returns
-------
Model of Keras
Partial yolo3 model from the input layer to the add_23 layer
"""
if self.conf['yolov3_base_model_load']:
base = load_model('yolov3_base.h5')
base.trainable = True
return base
yolov3 = make_yolov3_model()
# Load the weights.
weight_reader = WeightReader('yolov3.weights')
weight_reader.load_weights(yolov3)
# Make a base model.
input1 = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input1')
# 0 ~ 1.
conv_layer = yolov3.get_layer('conv_' + str(0))
x = ZeroPadding2D(1)(input1) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(0))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(1))
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 2 ~ 3.
for i in range(2, 4, 2):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
# 5.
conv_layer = yolov3.get_layer('conv_' + str(5))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(5))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 6 ~ 10.
for i in range(6, 10, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 12.
conv_layer = yolov3.get_layer('conv_' + str(12))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(12))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 13 ~ 35.
for i in range(13, 35, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 37.
conv_layer = yolov3.get_layer('conv_' + str(37))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(37))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 38 ~ 60.
for i in range(38, 60, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 62.
conv_layer = yolov3.get_layer('conv_' + str(62))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(62))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 63 ~ 73.
for i in range(63, 73, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
output = x
base = Model(inputs=[input1], outputs=[output])
base.trainable = True
base.save('yolov3_base.h5')
return base
def train(self):
"""Train face detector."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
trGen = self.TrainingSequence(self.raw_data_path, self.hps, self.nn_arch, load_flag=False)
elif self.conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
trGen = self.TrainingSequenceVGGFace2(self.raw_data_path, self.hps, self.nn_arch, load_flag=False)
else:
raise ValueError('resource type is not valid.')
if self.conf['multi_gpu']:
self.parallel_model.fit_generator(trGen
, steps_per_epoch=self.hps['step'] #?
, epochs=self.hps['epochs']
, verbose=1
, max_queue_size=400
, workers=8
, use_multiprocessing=True)
else:
self.model.fit_generator(trGen
, steps_per_epoch=self.hps['step']
, epochs=self.hps['epochs']
, verbose=1
, max_queue_size=100
, workers=4
, use_multiprocessing=True)
print('Save the model.')
self.model.save(self.MODEL_PATH)
def make_facial_ids_db(self):
"""Make facial ids database."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
db = pd.read_csv('subject_image_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
with h5py.File('subject_facial_ids.h5', 'w') as f:
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces', ff))
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
for k, ff in enumerate(list(df.iloc[:, 1])):
f[ff] = facial_ids[k]
f[ff].attrs['subject_id'] = subject_id
elif self.conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
db = pd.read_csv('subject_image_vggface2_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
with h5py.File('subject_facial_vggface2_ids.h5', 'w') as f:
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces_vggface2', ff)) #?
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
for k, ff in enumerate(list(df.iloc[:, 1])):
f[ff] = facial_ids[k]
f[ff].attrs['subject_id'] = subject_id
else:
raise ValueError('resource type is not valid.')
def register_facial_ids(self):
"""Register facial ids."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
db = pd.read_csv('subject_image_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
db_facial_id = pd.DataFrame(columns=['subject_id', 'facial_id'])
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces', ff))
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
facial_id = np.asarray( | pd.DataFrame(facial_ids) | pandas.DataFrame |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = pd.Series([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = pd.Series([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = pd.Series([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = pd.Series([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = pd.Series([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = pd.Series([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = pd.Series([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inmill_mort")
self.cbt_inv_soil_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inten_mort")
self.cbt_inv_soil_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_soil_low_lc50")
self.cbt_inv_soil_sub_direct = pd.Series([], dtype="float", name="cbt_inv_soil_sub_direct")
self.cbt_inv_soil_grow_noec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_noec")
self.cbt_inv_soil_grow_loec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_loec")
self.cbt_inv_soil_repro_noec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_noec")
self.cbt_inv_soil_repro_loec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_loec")
self.cbt_inv_soil_behav_noec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_noec")
self.cbt_inv_soil_behav_loec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_loec")
self.cbt_inv_soil_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_noec")
self.cbt_inv_soil_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_loec")
self.cbt_inv_soil_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_soil_sub_indirect")
# application rate-based toxicity (arbt) : mammals (lbs active ingredient/Acre)
self.arbt_mamm_mort = pd.Series([], dtype="float", name="arbt_mamm_mort")
self.arbt_mamm_growth = pd.Series([], dtype="float", name="arbt_mamm_growth")
self.arbt_mamm_repro = pd.Series([], dtype="float", name="arbt_mamm_repro")
self.arbt_mamm_behav = pd.Series([], dtype="float", name="arbt_mamm_behav")
self.arbt_mamm_sensory = pd.Series([], dtype="float", name="arbt_mamm_sensory")
# application rate-based toxicity (arbt) : birds (lbs active ingredient/Acre)
self.arbt_bird_mort = pd.Series([], dtype="float", name="arbt_bird_mort")
self.arbt_bird_growth = pd.Series([], dtype="float", name="arbt_bird_growth")
self.arbt_bird_repro = pd.Series([], dtype="float", name="arbt_bird_repro")
self.arbt_bird_behav = pd.Series([], dtype="float", name="arbt_bird_behav")
self.arbt_bird_sensory = pd.Series([], dtype="float", name="arbt_bird_sensory")
# application rate-based toxicity (arbt) : reptiles (lbs active ingredient/Acre)
self.arbt_reptile_mort = pd.Series([], dtype="float", name="arbt_reptile_mort")
self.arbt_reptile_growth = pd.Series([], dtype="float", name="arbt_reptile_growth")
self.arbt_reptile_repro = pd.Series([], dtype="float", name="arbt_reptile_repro")
self.arbt_reptile_behav = pd.Series([], dtype="float", name="arbt_reptile_behav")
self.arbt_reptile_sensory = pd.Series([], dtype="float", name="arbt_reptile_sensory")
# application rate-based toxicity (arbt) : invertebrates (lbs active ingredient/Acre)
self.arbt_inv_1inmill_mort = pd.Series([], dtype="float", name="arbt_inv_1inmill_mort")
self.arbt_inv_1inten_mort = pd.Series([], dtype="float", name="arbt_inv_1inten_mort")
self.arbt_inv_sub_direct = pd.Series([], dtype="float", name="arbt_inv_sub_direct")
self.arbt_inv_sub_indirect = pd.Series([], dtype="float", name="arbt_inv_sub_indirect")
self.arbt_inv_growth = pd.Series([], dtype="float", name="arbt_inv_growth")
self.arbt_inv_repro = pd.Series([], dtype="float", name="arbt_inv_repro")
self.arbt_inv_behav = pd.Series([], dtype="float", name="arbt_inv_behav")
self.arbt_inv_sensory = pd.Series([], dtype="float", name="arbt_inv_sensory")
# plant toxicity (pt) : monocots (lbs active ingredient/Acre)
self.pt_mono_pre_noec = pd.Series([], dtype="float", name="pt_mono_pre_noec")
self.pt_mono_pre_loec = pd.Series([], dtype="float", name="pt_mono_pre_loec")
self.pt_mono_pre_ec25 = pd.Series([], dtype="float", name="pt_mono_pre_ec25")
self.pt_mono_post_noec = pd.Series([], dtype="float", name="pt_mono_post_noec")
self.pt_mono_post_loec = pd.Series([], dtype="float", name="pt_mono_post_loec")
self.pt_mono_post_ec25 = pd.Series([], dtype="float", name="pt_mono_post_ec25")
self.pt_mono_dir_mort = pd.Series([], dtype="float", name="pt_mono_dir_mort")
self.pt_mono_indir_mort = pd.Series([], dtype="float", name="pt_mono_indir_mort")
self.pt_mono_dir_repro = pd.Series([], dtype="float", name="pt_mono_dir_repro")
self.pt_mono_indir_repro = pd.Series([], dtype="float", name="pt_mono_indir_repro")
# plant toxicity (pt) : dicots (lbs active ingredient/Acre)
self.pt_dicot_pre_noec = pd.Series([], dtype="float", name="pt_dicot_pre_noec")
self.pt_dicot_pre_loec = pd.Series([], dtype="float", name="pt_dicot_pre_loec")
self.pt_dicot_pre_ec25 = pd.Series([], dtype="float", name="pt_dicot_pre_ec25")
self.pt_dicot_post_noec = pd.Series([], dtype="float", name="pt_dicot_post_noec")
self.pt_dicot_post_loec = | pd.Series([], dtype="float", name="pt_dicot_post_loec") | pandas.Series |
"""(West) German interest and inflation rate 1972-1998"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath, pardir, join
__docformat__ = 'restructuredtext'
COPYRIGHT = """...""" # TODO
TITLE = __doc__
SOURCE = """
http://www.jmulti.de/download/datasets/e6.dat
"""
DESCRSHORT = """(West) German interest and inflation rate 1972Q2 - 1998Q4"""
DESCRLONG = """West German (until 1990) / German (afterwards) interest and
inflation rate 1972Q2 - 1998Q4
"""
NOTE = """::
Number of Observations - 107
Number of Variables - 2
Variable name definitions::
year - 1972q2 - 1998q4
quarter - 1-4
Dp - Delta log gdp deflator
R - nominal long term interest rate
"""
variable_names = ["Dp", "R"]
first_season = 1 # 1 stands for: first observation in Q2 (0 would mean Q1)
def load():
"""
Load the West German interest/inflation data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The interest_inflation Dataset instance does not contain endog and exog
attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = | DataFrame(dataset.data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 11:40:15 2019
@author: JUANSE
"""
# importamos las librerias necesarias
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
#establecemos un directorio de trabajo
os.chdir("C:/Users/Usuario/Documents/Sequia/acomodar_estaciones/todas/")
ruta = "C:/Users/Usuario/Documents/Sequia/acomodar_estaciones/todas/"
text_files = [f for f in os.listdir(ruta) if f.endswith('_qc_prec.txt')]
#text_files = [f for f in os.listdir(ruta) if f.endswith('_qc_tmax.txt')]
#text_files = [f for f in os.listdir(ruta) if f.endswith('_raw_tmin.txt')]
'''
estacion = pd.read_csv(text_files[0], sep=" ", parse_dates=["Date"],
index_col="Date")
#############
# graficamos#
#############
estacion.plot()
estacion[estacion>0].plot()
estacion.hist( bins=30, edgecolor='#4aaaaa', color='#80BCD8')
estacion[estacion>0].hist( bins=30, edgecolor='#4aaaaa', color='#80BCD8')
########################
# analisis de tendencia#
########################
# diagrama de caja
estacion[estacion['Value']>0].boxplot('Value')
# diagrama de densidad
estacion[estacion['Value']>0].plot.density()
# diagrama de dispersión
plt.scatter(estacion[estacion['Value']>0].index,estacion[estacion['Value']>0]['Value'])
# diagrama de violin
plt.violinplot(estacion[estacion['Value']>0].index,estacion[estacion['Value']>0]['Value'])
#################################
#frecuencias y tiempo de retorno#
#################################
#hallamos el promedio y la desviación
promedio = estacion[estacion>0].mean()
desviacion = estacion[estacion>0].std()
print(promedio, desviacion)
#determinamos las regresiones estadisticas
tabulaciones = np.arange(-40,51,0.1)
distnormal = stats.norm.pdf(tabulaciones,
loc=promedio, scale=desviacion)
distlognormal = stats.pearson3.pdf(tabulaciones,skew=1,
loc=promedio, scale=desviacion)
distweibull = stats.dweibull.pdf(tabulaciones,c=1,
loc=promedio, scale=desviacion)
distchi2 = stats.chi2.pdf(tabulaciones,df=2,
loc=promedio, scale=desviacion)
#ploteamos los datos
estacion[estacion>0].hist(bins=100, normed=True, edgecolor='#4aaaaa', color='#80BCD8')
plt.plot(tabulaciones,distnormal, color='#4B4C4E', linewidth=5, linestyle='--',label='Dist Normal')
plt.plot(tabulaciones,distlognormal, color='#3F83B7', linewidth=5, linestyle='--', label='Dist Lognormal')
plt.plot(tabulaciones,distweibull, color='#7B7C7E', linewidth=5, linestyle='-.', label='Dist Weibull')
plt.plot(tabulaciones,distchi2, color='#3F83B7', linewidth=5, linestyle=':', label='Dis Chi2')
plt.xlim(0,200)
plt.legend(loc='upper right')
#plt.figsize(21,14)
'''
######################
# analisis en bloque #
######################
#concatenar todas las estaciones
'''
df = pd.concat([pd.read_csv(f, sep=" ",
parse_dates=["Date"]) for f in text_files],
ignore_index = True)
'''
dfs = (pd.read_csv(fname,sep=" ",parse_dates=["Date"]) for fname in text_files)
master_df = pd.concat(
(df[[c for c in df.columns if c.lower().startswith('folder')]]
for df in dfs), axis=1)
####################3
#text_files = [f for f in os.listdir(ruta) if f.endswith('_raw_prec.txt')]
# saber que estaciones leer
posicion = pd.read_csv("C:/Users/Usuario/Documents/Sequia/acomodar_estaciones/estaciones_guajira_2019_2.csv")
meej = str(posicion.id).split("_")
m= []
for i in range(len(posicion.id)):
jk = str(posicion.id[i])#.split("_")
m.append(jk)
m= np.asarray(m)
m = [int(i) for i in m]
n=[]
for i in range(len(text_files)):
l= text_files[i].split("_")[0]
n.append(l)
n=np.asarray(n)
n = [int(i) for i in n]
# concatenar
este=1
h = []
for i in (range(len(text_files))):
if n[i] in m:
estacion = pd.read_csv(text_files[i], sep=" ", parse_dates=["Date"],
index_col="Date")
columna = str(n[i])
estacion = estacion.rename(columns={'Value' : columna })
k=i
h.append(k)
if i<=57:
est=estacion
else:
est = pd.concat([est,estacion],axis=1)
else:
este=este+1
print("este no")
est.to_csv('C:/Users/Usuario/Documents/Sequia/acomodar_estaciones/todas/est_prec_qc.csv',sep=';', na_rep='NA')
#est.to_csv('C:/Users/Usuario/Documents/Sequia/acomodar_estaciones/todas/est_tmax.csv',sep=';', na_rep='NA')
#est.to_csv('C:/Users/Usuario/Documents/Sequia/acomodar_estaciones/todas/est_tmin.csv',sep=';', na_rep='NA')
est.to_excel('C:/Users/Usuario/Documents/Sequia/acomodar_estaciones/todas/est.xlsx', na_rep='NA')
est.ix['1968-02-01':'2015-02-28'].plot(subplots=True, figsize=(100, 50)); plt.legend(loc='best')
est.iloc[:,30].plot()
est[est>=0].plot(legend=False)
###############################################################################
# Filtramos para que tengan al menos el 80% de los datos validos
# primero filtramos las columnas que tienen mas del 20% de nodata
est2 = est.loc[:, est.isnull().mean() < .8]
print (est.head())
est['year'] = pd.DatetimeIndex(est['Date']).year
estt = | pd.read_csv('C:/Users/Usuario/Documents/Sequia/acomodar_estaciones/todas/est_prec_qc.csv',sep=';',parse_dates=["Date"]) | pandas.read_csv |
import pandas as pd
if __name__=='__main__':
for i in range(90001,90011):
prophet_file_path='../data/prophet/prophet_feature_'+str(i)+'.csv'
prophet_data=pd.read_csv(prophet_file_path,index_col=0)
prophet_data.index=pd.to_datetime(prophet_data.index)
train_file_path = '../data/train/merge/nafilled_' + str(i) + '.csv'
train_data = pd.read_csv(train_file_path, index_col=0)
train_data.index = pd.to_datetime(train_data.index)
train_merged_data = | pd.merge(train_data, prophet_data, left_index=True, right_index=True) | pandas.merge |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: | pd.Timestamp("2013-01-05 00:00:00") | pandas.Timestamp |
import pandas as pd
import numpy as np
import datetime as dt
import concurrent.futures
import threading
from unidecode import unidecode
def get_parties_procesos():
_parties_procesos = pd.read_sql(sql=""" select "t1"."CodigoProceso" as "tender/id",
"t1"."UnidadCompra" as "parties/0/name",
"t1"."CodigoUnidadCompra" as "parties/0/id",
"t2"."FullLocation" as "parties/0/address/streetAddress"
from TRUNCATED as "t1"
inner join ContractNotice as "t2"
on "t1"."CodigoProceso" = "t2"."RequestReference" """,
con=engine_pt.connect())
mapeo_parties1 = _parties_procesos
##Process Identifiers
mapeo_parties1['ocid'] = 'ocds-6550wx-' + mapeo_parties1['tender/id']
mapeo_parties1['id'] = mapeo_parties1['tender/id']
##Identifier Scheme
mapeo_parties1['parties/0/identifier/scheme'] = 'DO-UC'
mapeo_parties1['parties/0/identifier/id'] = mapeo_parties1['parties/0/id']
mapeo_parties1['parties/0/identifier/legalName'] = mapeo_parties1['parties/0/name']
mapeo_parties1['parties/0/id'] = 'DO-UC-'+ mapeo_parties1['parties/0/identifier/id']
#Party Role
mapeo_parties1['parties/0/roles'] = 'procuringEntity'
mapeo_parties1= mapeo_parties1[
['parties/0/name',
'parties/0/id',
'parties/0/identifier/scheme',
'parties/0/identifier/id',
'parties/0/roles']]
return mapeo_parties1
def get_parties_contratos():
options = {
'TipoPersonaRPENoEspecificada': {
'Persona Natural': 'N/A',
'Persona Jurídica': 'No clasificada',
'Oficina Gubernamental': 'No clasificada',
'Cooperativa': 'No clasificada',
'Asociación Sin Fines de Lucro': 'No clasificada',
},
'ClasificacionRPE': {
'Migración': 'No clasificada'
},
'PartiesDetalles': {
'N/A': 'person',
'Gran empresa': 'large',
'Mediana empresa': 'medium',
'Pequeña empresa': 'small',
'Micro empresa': 'micro',
'No clasificada': ''
},
'genero': {
'Femenino': 'female',
'Masculino': 'male',
'No Especificado': ''}
}
_parties_contratos = pd.read_sql(sql="""select "t1"."CodigoProceso" as "tender/id",
"t1"."RPE" as "parties/0/id"
from TRUNCATED as "t1" """,
con=engine_pt.connect())
parties_contratos= _parties_contratos
##Process Identifiers
parties_contratos['ocid'] = 'ocds-6550wx-' + parties_contratos['tender/id']
parties_contratos['id'] = parties_contratos['tender/id']
_proveedores = pd.read_sql(sql=""" select "t1"."RazonSocial" as "parties/0/name",
"t1"."RPE" as "parties/0/id",
"t1"."Direccion" as "parties/0/address/streetAddress",
"t1"."Municipio" as "parties/0/address/locality",
"t1"."Provincia" as "parties/0/address/region",
"t1"."Genero",
"t1"."ClasificacionRPE",
"t1"."TipoPersona"
from TRUNCATED as "t1" """, con=engine_sigef.connect())
proveedores= _proveedores
proveedores['clasificacion_empresarial'] = proveedores.apply(lambda x: options['TipoPersonaRPENoEspecificada'].get(x.TipoPersona, x.ClasificacionRPE) if x.ClasificacionRPE=='No Especificada' else (options['ClasificacionRPE'].get(x.ClasificacionRPE, x.ClasificacionRPE)), axis=1)
#Extensions
proveedores['parties/0/details/scale'] = proveedores['clasificacion_empresarial'].apply(lambda x: options['PartiesDetalles'].get(x,x))
proveedores['parties/0/details/gender'] = proveedores['Genero'].apply(lambda x: options['genero'].get(x,x))
##Merging the party information
parties_contratos['parties/0/id'] = parties_contratos['parties/0/id'].astype('float')
proveedores['parties/0/id'] = proveedores['parties/0/id'].astype('float')
mapeo_parties2 = pd.merge(parties_contratos, proveedores, left_on='parties/0/id', right_on='parties/0/id', how='left')
mapeo_parties2['parties/0/id'] = mapeo_parties2['parties/0/id'].astype('str')
##Identifier Scheme
mapeo_parties2['parties/0/identifier/scheme'] = 'DO-RPE'
mapeo_parties2['parties/0/identifier/id'] = mapeo_parties2['parties/0/id']
mapeo_parties2['parties/0/identifier/legalName'] = mapeo_parties2['parties/0/name']
mapeo_parties2['parties/0/id'] = 'DO-UC-' + mapeo_parties2['parties/0/identifier/id']
#Party Role
mapeo_parties2['parties/0/roles'] = 'supplier'
mapeo_parties3= mapeo_parties2[
['ocid',
'id',
'parties/0/name',
'parties/0/id',
'parties/0/identifier/scheme',
'parties/0/identifier/id',
'parties/0/roles']]
mapeo_parties4= mapeo_parties2[
['parties/0/id',
'parties/0/identifier/legalName',
'parties/0/address/streetAddress',
'parties/0/address/locality',
'parties/0/address/region',
'parties/0/details/gender',
'parties/0/details/scale']]
mapeo_parties1= get_parties_procesos()
frames = [mapeo_parties1, mapeo_parties3]
completo= pd.concat(frames)
completo2= | pd.merge(completo, mapeo_parties4, left_on='parties/0/id', right_on='parties/0/id', how='left') | pandas.merge |
# This file is part of Patsy
# Copyright (C) 2012-2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# Exhaustive end-to-end tests of the top-level API.
import sys
import __future__
import six
import numpy as np
from nose.tools import assert_raises
from patsy import PatsyError
from patsy.design_info import DesignMatrix, DesignInfo
from patsy.eval import EvalEnvironment
from patsy.desc import ModelDesc, Term, INTERCEPT
from patsy.categorical import C
from patsy.contrasts import Helmert
from patsy.user_util import balanced, LookupFactor
from patsy.build import (design_matrix_builders,
build_design_matrices)
from patsy.highlevel import *
from patsy.util import (have_pandas,
have_pandas_categorical,
have_pandas_categorical_dtype,
pandas_Categorical_from_codes)
from patsy.origin import Origin
if have_pandas:
import pandas
def check_result(expect_full_designs, lhs, rhs, data,
expected_rhs_values, expected_rhs_names,
expected_lhs_values, expected_lhs_names): # pragma: no cover
assert np.allclose(rhs, expected_rhs_values)
assert rhs.design_info.column_names == expected_rhs_names
if lhs is not None:
assert np.allclose(lhs, expected_lhs_values)
assert lhs.design_info.column_names == expected_lhs_names
else:
assert expected_lhs_values is None
assert expected_lhs_names is None
if expect_full_designs:
if lhs is None:
new_rhs, = build_design_matrices([rhs.design_info], data)
else:
new_lhs, new_rhs = build_design_matrices([lhs.design_info,
rhs.design_info],
data)
assert np.allclose(new_lhs, lhs)
assert new_lhs.design_info.column_names == expected_lhs_names
assert np.allclose(new_rhs, rhs)
assert new_rhs.design_info.column_names == expected_rhs_names
else:
assert rhs.design_info.terms is None
assert lhs is None or lhs.design_info.terms is None
def dmatrix_pandas(formula_like, data={}, depth=0, return_type="matrix"):
return_type = "dataframe"
if isinstance(depth, int):
depth += 1
return dmatrix(formula_like, data, depth, return_type=return_type)
def dmatrices_pandas(formula_like, data={}, depth=0, return_type="matrix"):
return_type = "dataframe"
if isinstance(depth, int):
depth += 1
return dmatrices(formula_like, data, depth, return_type=return_type)
def t(formula_like, data, depth,
expect_full_designs,
expected_rhs_values, expected_rhs_names,
expected_lhs_values=None, expected_lhs_names=None): # pragma: no cover
if isinstance(depth, int):
depth += 1
def data_iter_maker():
return iter([data])
if (isinstance(formula_like, six.string_types + (ModelDesc, DesignInfo))
or (isinstance(formula_like, tuple)
and isinstance(formula_like[0], DesignInfo))
or hasattr(formula_like, "__patsy_get_model_desc__")):
if expected_lhs_values is None:
builder = incr_dbuilder(formula_like, data_iter_maker, depth)
lhs = None
(rhs,) = build_design_matrices([builder], data)
else:
builders = incr_dbuilders(formula_like, data_iter_maker, depth)
lhs, rhs = build_design_matrices(builders, data)
check_result(expect_full_designs, lhs, rhs, data,
expected_rhs_values, expected_rhs_names,
expected_lhs_values, expected_lhs_names)
else:
assert_raises(PatsyError, incr_dbuilders,
formula_like, data_iter_maker)
assert_raises(PatsyError, incr_dbuilder,
formula_like, data_iter_maker)
one_mat_fs = [dmatrix]
two_mat_fs = [dmatrices]
if have_pandas:
one_mat_fs.append(dmatrix_pandas)
two_mat_fs.append(dmatrices_pandas)
if expected_lhs_values is None:
for f in one_mat_fs:
rhs = f(formula_like, data, depth)
check_result(expect_full_designs, None, rhs, data,
expected_rhs_values, expected_rhs_names,
expected_lhs_values, expected_lhs_names)
# We inline assert_raises here to avoid complications with the
# depth argument.
for f in two_mat_fs:
try:
f(formula_like, data, depth)
except PatsyError:
pass
else:
raise AssertionError
else:
for f in one_mat_fs:
try:
f(formula_like, data, depth)
except PatsyError:
pass
else:
raise AssertionError
for f in two_mat_fs:
(lhs, rhs) = f(formula_like, data, depth)
check_result(expect_full_designs, lhs, rhs, data,
expected_rhs_values, expected_rhs_names,
expected_lhs_values, expected_lhs_names)
def t_invalid(formula_like, data, depth, exc=PatsyError): # pragma: no cover
if isinstance(depth, int):
depth += 1
fs = [dmatrix, dmatrices]
if have_pandas:
fs += [dmatrix_pandas, dmatrices_pandas]
for f in fs:
try:
f(formula_like, data, depth)
except exc:
pass
else:
raise AssertionError
# Exercise all the different calling conventions for the high-level API
def test_formula_likes():
# Plain array-like, rhs only
t([[1, 2, 3], [4, 5, 6]], {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"])
t((None, [[1, 2, 3], [4, 5, 6]]), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"])
t(np.asarray([[1, 2, 3], [4, 5, 6]]), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"])
t((None, np.asarray([[1, 2, 3], [4, 5, 6]])), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"])
dm = DesignMatrix([[1, 2, 3], [4, 5, 6]], default_column_prefix="foo")
t(dm, {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["foo0", "foo1", "foo2"])
t((None, dm), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["foo0", "foo1", "foo2"])
# Plain array-likes, lhs and rhs
t(([1, 2], [[1, 2, 3], [4, 5, 6]]), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"],
[[1], [2]], ["y0"])
t(([[1], [2]], [[1, 2, 3], [4, 5, 6]]), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"],
[[1], [2]], ["y0"])
t((np.asarray([1, 2]), np.asarray([[1, 2, 3], [4, 5, 6]])), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"],
[[1], [2]], ["y0"])
t((np.asarray([[1], [2]]), np.asarray([[1, 2, 3], [4, 5, 6]])), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"],
[[1], [2]], ["y0"])
x_dm = DesignMatrix([[1, 2, 3], [4, 5, 6]], default_column_prefix="foo")
y_dm = DesignMatrix([1, 2], default_column_prefix="bar")
t((y_dm, x_dm), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["foo0", "foo1", "foo2"],
[[1], [2]], ["bar0"])
# number of rows must match
t_invalid(([1, 2, 3], [[1, 2, 3], [4, 5, 6]]), {}, 0)
# tuples must have the right size
t_invalid(([[1, 2, 3]],), {}, 0)
t_invalid(([[1, 2, 3]], [[1, 2, 3]], [[1, 2, 3]]), {}, 0)
# plain Series and DataFrames
if have_pandas:
# Names are extracted
t( | pandas.DataFrame({"x": [1, 2, 3]}) | pandas.DataFrame |
# standard library
from typing import List, Union, Tuple
# dependent packages
import numpy as np
import pandas as pd
from lmfit.models import LorentzianModel
from scipy.interpolate import interp1d
from scipy.stats import cauchy
# type aliases
ArrayLike = Union[np.ndarray, List[float], List[int], float, int]
# main functions
def eta_filter_lorentzian(
F: ArrayLike,
FWHM: ArrayLike,
eta_circuit: ArrayLike = 1,
F_res: int = 30,
overflow: int = 80,
) -> Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
ArrayLike,
ArrayLike,
ArrayLike,
]:
"""Calculate the filter transmissions as a matrix of
Lorentzian approximations. Also calculates approximating box filter
Parameters
----------
F
Center frequency of the filter channels.
Units: Hz (works also for GHz, will detect)
FWHM
Full width at half maximum of the filter channels.
Units: same as F.
eta_circuit
Average transmission over the FWHM. Equal to pi/4 times the peak transmission
Units: none.
F_res
The number of frequency bins within a FWHM
Units: none.
Overflow
The number of extra FHWM's below the first and above the last channel
Units: none.
Returns
-------
eta_filter
The filter transmission as an m x n matrix
m: the number of integration bins.
n: the number of filter channels.
Units: None.
eta_inband
Whether a frequency is out- (false) or inband (true) as an m x n matrix
m: the number of integration bins.
n: the number of filter channels.
Units: None.
F_int
Frequency integration bins.
Units: Hz.
W_F_int
Integration bandwith bins.
Units: Hz.
box_height
The transmission of the box-filter approximation.
Units: None.
box-width
The bandwidth of the box-filter approximation.
Units: Hz.
chi-sq
Zero. For compatibility with the .csv fit
Units: None.
"""
if np.average(F) < 10.0**9:
F = F * 10.0**9
FWHM = FWHM * 10.0**9
# give F a length if it is an integer.
if not hasattr(F, "__len__"):
F = np.asarray([F])
# give FWHM a length if it is an integer.
if not hasattr(FWHM, "__len__"):
FWHM = np.asarray([FWHM])
F_int, W_F_int = expand_F(F, FWHM, F_res, overflow)
eta_filter = (
eta_circuit
* 4
/ np.pi
* (cauchy.pdf(F_int[np.newaxis].T, F, FWHM / 2) * FWHM / 2 * np.pi).T
)
# Equivalent in-band box filter approximation
box_height = eta_circuit
box_width = FWHM
chi_sq = np.zeros(len(F))
eta_inband = eta_inband_mask(F_int, F, box_width / 2) * eta_filter
return eta_filter, eta_inband, F, F_int, W_F_int, box_height, box_width, chi_sq
def eta_filter_csv(
file: str,
) -> Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
ArrayLike,
ArrayLike,
ArrayLike,
]:
"""Read filter transmissionsfrom csv and return filter matrix,
integrationbins and integration bin bandwith
Parameters
----------
file
A string to the location of the .csv file
.csv has headers of frequency bins (in GHz) and rows of channels
Returns
-------
eta_filter
The filter transmission as an m x n matrix
m: the number of integration bins.
n: the number of filter channels.
Units: None.
eta_inband
Whether a frequency is out- (false) or inband (true) as an m x n matrix
m: the number of integration bins.
n: the number of filter channels.
Units: None.
F
Center frequency of each channel
F_int
The integration bins. Units: Hz.
F_W_int
The integration bandwith. units: Hz.
box_height
The transmission of the box-filter approximation.
Units: None.
box_width
The bandwidth of the box-filter approximation.
Units: Hz
chi_sqr
The Chi Square value of the Lorentzian fit.
Units: None.
"""
eta_filter_df = pd.read_csv(file, header=0)
F_int = eta_filter_df.columns.values.astype(float)
if np.average(F_int) < 10.0**9:
F_int = F_int * 10.0**9
eta_filter_df.columns = F_int
# Fit to lorentzian model
fit = np.apply_along_axis(fit_lorentzian, 1, eta_filter_df.to_numpy(), x=F_int)
fit_df = | pd.DataFrame(fit, columns=["Center", "HWHM", "max height", "chi sq"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas._testing as tm
class TestTranspose:
def test_transpose_tzaware_1col_single_tz(self):
# GH#26825
dti = | pd.date_range("2016-04-05 04:30", periods=3, tz="UTC") | pandas.date_range |
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None) # 모든 열을 출력하도록
target_feature = 'AGE_GROUP'
# fill or drop na value
def checkNA(data):
# drop unused data
data = data.drop(
['HCHK_YEAR', 'SIDO', 'TOT_CHOLE', 'TRIGLYCERIDE', 'HDL_CHOLE', 'LDL_CHOLE', 'HCHK_OE_INSPEC_YN', 'CRS_YN',
'TTR_YN', 'DATA_STD__DT'], axis=1)
data = data.dropna(subset=['AGE_GROUP', 'HEIGHT', 'WEIGHT', 'WAIST'], axis=0)
data = data.fillna(method='ffill')
data = data.fillna(method='bfill')
return data
# read data from csv file
def read_data():
data = pd.read_csv("health_checkup.csv")
data = checkNA(data)
return data
# for one-hot-encoding
def dummy_data(data, columns):
for column in columns:
data = pd.concat([data, pd.get_dummies(data[column], prefix=column)], axis=1)
data = data.drop(column, axis=1)
return data
def Preprocessing(feature, encode_list, scale_list):
# feature : dataframe of feature
# scaler
scaler_stndard = preprocessing.StandardScaler()
scaler_MM = preprocessing.MinMaxScaler()
scaler_robust = preprocessing.RobustScaler()
scaler_maxabs = preprocessing.MaxAbsScaler()
scaler_normalize = preprocessing.Normalizer()
scalers = [None, scaler_stndard, scaler_MM, scaler_robust, scaler_maxabs, scaler_normalize]
scalers_name = ["original", "standard", "minmax", "robust", "maxabs", "normalize"]
# encoder
encoder_ordinal = preprocessing.OrdinalEncoder()
# one hot encoding => using pd.get_dummies() (not used preprocessing.OneHotEncoder())
encoders_name = ["ordinal", "onehot"]
# result box
result_dictionary = {}
i = 0
if encode_list == []:
for scaler in scalers:
if i == 0: # not scaling
result_dictionary[scalers_name[i]] = feature.copy()
else:
# ===== scalers
result_dictionary[scalers_name[i]] = feature.copy()
result_dictionary[scalers_name[i]][scale_list] = scaler.fit_transform(feature[scale_list]) # scaling
i = i + 1
return result_dictionary
for scaler in scalers:
if i == 0: # not scaling
result_dictionary[scalers_name[i] + "_ordinal"] = feature.copy()
result_dictionary[scalers_name[i] + "_ordinal"][encode_list] = encoder_ordinal.fit_transform(
feature[encode_list])
result_dictionary[scalers_name[i] + "_onehot"] = feature.copy()
result_dictionary[scalers_name[i] + "_onehot"] = dummy_data(result_dictionary[scalers_name[i] + "_onehot"],
encode_list)
else:
# ===== scalers + ordinal encoding
result_dictionary[scalers_name[i] + "_ordinal"] = feature.copy()
result_dictionary[scalers_name[i] + "_ordinal"][scale_list] = scaler.fit_transform(
feature[scale_list]) # scaling
result_dictionary[scalers_name[i] + "_ordinal"][encode_list] = encoder_ordinal.fit_transform(
feature[encode_list]) # encoding
# ===== scalers + OneHot encoding
result_dictionary[scalers_name[i] + "_onehot"] = feature.copy()
result_dictionary[scalers_name[i] + "_onehot"][scale_list] = scaler.fit_transform(
feature[scale_list]) # scaling
result_dictionary[scalers_name[i] + "_onehot"] = dummy_data(result_dictionary[scalers_name[i] + "_onehot"],
encode_list) # encoding
i = i + 1
return result_dictionary
def plotCurrentResult(score, title):
plt.title(title)
x_values = range(1, len(score) + 1)
plt.xlabel('Parameter set')
if "Decision Tree" in score['current_model'][0]:
tempList = [_['max_depth'] for _ in score['params']]
plt.xticks(x_values, tempList)
elif "Support Vector Machine" in score['current_model'][0]:
tempList = [[_['C'], _['gamma']] for _ in score['params']]
plt.xticks(x_values, tempList)
else:
tempList = [_['n_neighbors'] for _ in score['params']]
plt.xticks(x_values, tempList)
plt.ylabel('mean score')
y_values = score['mean_test_score'].tolist()
plt.plot(x_values, y_values)
plt.show()
def plotCurrentResult2(score, title):
plt.title(title)
x_values = range(1, len(score) + 1)
xList = []
yList = []
plt.xlabel('Model states')
plt.ylabel('best score')
for _ in score:
xList.append([_['best-model'], _['best-param']])
yList.append(_['best-score'])
plt.xticks(x_values, xList)
plt.plot(x_values, yList)
plt.show()
def classification(data, target):
# temp best record variable
best_answer = {
'best-model': "",
'best-param': "",
'best-score': -1.0,
}
whole_score = pd.DataFrame()
# split train / test dataset
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2)
# case 1: decision tree (entropy)
classifier = DecisionTreeClassifier(criterion="entropy")
params = {
'max_depth': [2, 3, 4, 5, 6, 7, 8]
}
grid_tree = GridSearchCV(classifier, param_grid=params, cv=3, refit=True)
grid_tree.fit(X_train, y_train)
bestModel = grid_tree.best_estimator_
bestParam = grid_tree.best_params_
bestScore = bestModel.score(X_test, y_test)
if bestScore > best_answer['best-score']:
best_answer = {
'best-model': "Decision Tree (entropy)",
'best-param': bestParam,
'best-score': bestScore
}
scores = pd.DataFrame(grid_tree.cv_results_)[
['params', 'mean_test_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score']]
scores['current_model'] = "Decision Tree (entropy)"
whole_score = | pd.concat([whole_score, scores], ignore_index=True) | pandas.concat |
import numpy as np
import matplotlib.pyplot as plt
import psycopg2 as sql
import pandas as pd
db = sql.connect(
database='IMDb',
user='username',
password = 'password'
)
c = db.cursor()
def media_counts(q_tvEpisode, q_short, q_movie, q_video, q_tvMovie, q_tvSeries):
c.execute(q_tvEpisode)
rows = c.fetchall()
tv_Episode_data = pd.DataFrame(rows, columns=['year_produced', 'Amount'])
c.execute(q_short)
rows = c.fetchall()
short_data = pd.DataFrame(rows, columns=['year_produced', 'Amount'])
c.execute(q_movie)
rows = c.fetchall()
movie_data = pd.DataFrame(rows, columns=['year_produced', 'Amount'])
c.execute(q_video)
rows = c.fetchall()
video_data = pd.DataFrame(rows, columns=['year_produced', 'Amount'])
c.execute(q_tvMovie)
rows = c.fetchall()
tvMovie_data = | pd.DataFrame(rows, columns=['year_produced', 'Amount']) | pandas.DataFrame |
# std
import copy
import time
from contextlib import closing
# 3rd
import numpy as np
import pandas as pd
import pathlib
from typing import Union, List, Dict, Optional, Iterable, Sequence, Any
from sqlite3 import Connection
# ours
import ankipandas.raw as raw
from ankipandas.util.dataframe import replace_df_inplace, merge_dfs
import ankipandas._columns as _columns
from ankipandas.util.misc import invert_dict, flatten_list_list
from ankipandas.util.log import log
from ankipandas.util.checksum import field_checksum
from ankipandas.util.guid import guid as generate_guid
from ankipandas.util.types import (
is_list_list_like,
is_list_like,
is_list_dict_like,
is_dict_list_like,
)
class AnkiDataFrame(pd.DataFrame):
#: Additional attributes of a :class:`AnkiDataFrame` that a normal
#: :class:`pandas.DataFrame` does not possess. These will be copied in the
#: constructor.
#: See https://pandas.pydata.org/pandas-docs/stable/development/extending.html
_metadata = [
"col",
"_anki_table",
"fields_as_columns_prefix",
"_fields_format",
"_df_format",
]
def __init__(self, *args, **kwargs):
"""Initializes a blank :class:`AnkiDataFrame`.
.. warning::
It is recommended to directly initialize this class with the notes,
cards or revs table, using one of the methods
:meth:`.notes`, :meth:`.cards` or :meth:`.revs` instead!
Args:
*args: Internal use only. See arguments of
:class:`pandas.DataFrame`.
**kwargs: Internal use only. See arguments of
:class:`pandas.DataFrame`.
"""
super().__init__(*args, **kwargs)
# IMPORTANT: Make sure to add all attributes to the class variable
# :attr:`._attributes`. Also all of them have to be initialized as None!
# (see the code where we copy attributes).
# todo: document
self.col = None
# noinspection PyTypeChecker
# gets set by _get_table
#: Type of anki table: 'notes', 'cards' or 'revlog'. This corresponds to
#: the meaning of the ID row.
self._anki_table = None # type: str
#: Prefix for fields as columns. Default is ``nfld_``.
self.fields_as_columns_prefix = "nfld_"
#: Fields format: ``none``, ``list`` or ``columns`` or ``in_progress``,
#: or ``anki`` (default)
self._fields_format = "anki"
# gets set by _get_table
# noinspection PyTypeChecker
#: Overal structure of the dataframe ``anki``, ``ours``, ``in_progress``
self._df_format = None # type: str
@property
def _constructor(self):
"""This needs to be overridden so that any DataFrame operations do not
return a :class:`pandas.DataFrame` but a :class:`AnkiDataFrame`."""
return AnkiDataFrame
# Constructors
# ==========================================================================
def _get_table(self, col, table, empty=False):
self._anki_table = table
self._df_format = "anki"
self.col = col
if empty:
df = raw.get_empty_table(table)
else:
with closing(col.db) as db:
df = raw.get_table(db, table)
replace_df_inplace(self, df)
self.normalize(inplace=True)
@classmethod
def init_with_table(cls, col, table, empty=False):
new = AnkiDataFrame()
new._get_table(col, table, empty=empty)
return new
# Fixes
# ==========================================================================
def equals(self, other):
return pd.DataFrame(self).equals(other)
def append(self, *args, **kwargs):
ret = pd.DataFrame.append(self, *args, **kwargs)
ret.astype(_columns.dtype_casts2[self._anki_table])
return ret
def update(self, other, force=False, **kwargs):
if not force and isinstance(other, AnkiDataFrame):
if other._df_format != self._df_format:
raise ValueError(
"You're trying to update an AnkiDataFrame in format {f1}"
" using another AnkiDataFrame in format {f2}. That doesn't "
"sound like a good idea. However you can still do this "
"using the force=True option.".format(
f1=self._df_format, f2=other._df_format
)
)
if other._anki_table != self._anki_table:
raise ValueError(
"You're trying to update an AnkiDataFrame of table {f1} "
"with an AnkiDataFrame of table {f2}. That doesn't sound"
" like a good idea. However you can still do this using "
"the force=True option.".format(
f1=self._anki_table, f2=other._anki_table
)
)
if self._anki_table == "notes":
if other._fields_format != self._fields_format:
raise ValueError(
"You are trying to update a notes AnkiDataFrame where "
"the fields are in format '{f1}' with a notes "
"AnkiDataFrame where the fields are in format '{f2}'. "
"That doesn't sound like a good idea. However you can "
"still do this using the force=True option. "
"Or you simply ensure that both have the same format"
" using the fields_as_columns() or fields_as_list() "
"method.".format(
f1=self._fields_format, f2=other._fields_format
)
)
super().update(other, **kwargs)
# Fix https://github.com/pandas-dev/pandas/issues/4094
for col, typ in _columns.dtype_casts2[self._anki_table].items():
self[col] = self[col].astype(typ)
# Checks
# ==========================================================================
def check_table_integrity(self):
duplicates = self.index[self.index.duplicated()].tolist()
if duplicates:
log.critical(
"Duplicated indizes in table %s discovered, so something "
"definitely went wrong. Please don't ignore this warning. "
"These indizes appear more than once: %s",
self._anki_table,
", ".join(map(str, duplicates)),
)
def _invalid_table(self):
raise ValueError(f"Invalid table: {self._anki_table}.")
def _check_df_format(self):
if self._df_format == "in_progress":
raise ValueError(
"Previous call to normalize() or raw() did not terminate "
"successfully. This is usually a very bad sign, but you can "
"try calling them again with the force option: raw(force=True) "
"or raw(force=True) and see if that works."
)
elif self._df_format == "anki":
pass
elif self._df_format == "ours":
pass
else:
raise ValueError(f"Unknown value of _df_format: {self._df_format}")
def _check_our_format(self):
self._check_df_format()
if not self._df_format == "ours":
raise ValueError(
"This operation is not supported for AnkiDataFrames in the "
"'raw' format. Perhaps you called raw() before or used the "
"raw=True option when loading? You can try switching to the "
"required format using the normalize() method."
)
# Properties
# ==========================================================================
@property
def db(self) -> Connection:
"""Opened Anki database (:class:`sqlite3.Connection`). Make sure to
call `db.close()` after you're done. Better still, use
`contextlib.closing`.
"""
return self.col.db
# IDs
# ==========================================================================
@property
def id(self):
"""Return note/card/review ID as :class:`pandas.Series` of integers."""
if self._anki_table == "notes":
return self.nid
elif self._anki_table == "cards":
return self.cid
elif self._anki_table == "revs":
return self.rid
else:
self._invalid_table()
@property
def nid(self):
"""Note ID as :class:`pandas.Series` of integers."""
if self._anki_table == "notes":
return self.index
elif self._anki_table == "cards":
if "nid" not in self.columns:
raise ValueError(
"You seem to have removed the 'nid' column. That was not "
"a good idea. Cannot get note ID anymore."
)
else:
return self["nid"]
elif self._anki_table == "revs":
if "nid" in self.columns:
return self["nid"]
else:
return self.cid.map(raw.get_cid2nid(self.db))
else:
self._invalid_table()
@nid.setter
def nid(self, value):
if self._anki_table == "notes":
raise ValueError(
"Note ID column should already be index and notes.nid() will "
"always return this index. Therefore you should not set nid "
"to a column."
)
else:
self["nid"] = value
@property
def cid(self):
"""Card ID as :class:`pandas.Series` of integers."""
if self._anki_table == "cards":
return self.index
if self._anki_table == "revs":
if "cid" not in self.columns:
raise ValueError(
"You seem to have removed the 'cid' column. That was not "
"a good idea. Cannot get card ID anymore."
)
else:
return self["cid"]
elif self._anki_table == "notes":
raise ValueError(
"Notes can belong to multiple cards. Therefore it is impossible"
" to associate a card ID with them."
)
else:
self._invalid_table()
@cid.setter
def cid(self, value):
if self._anki_table == "cards":
raise ValueError(
"Card ID column should already be index and notes.cid() will "
"always return this index. Therefore you should not set cid "
"to a column."
)
elif self._anki_table == "revs":
self["cid"] = value
else:
raise ValueError(
"Notes can belong to multiple cards. Therefore please "
" do not associate a card ID with them."
)
@property
def rid(self):
"""Review ID as :class:`pandas.Series` of integers."""
if self._anki_table == "revs":
return self.index
else:
if "rid" in self.columns:
return self["rid"]
else:
raise ValueError(
"Review index is only available for the 'revs' table by"
" default."
)
# noinspection PyUnusedLocal
@rid.setter
def rid(self, value):
if self._anki_table == "revs":
raise ValueError(
"Review ID column should already be index and notes.rid() will "
"always return this index. Therefore you should not set rid "
"to a column."
)
else:
raise ValueError(
"Setting a review index 'rid' makes no sense in "
"tables other than 'rev'."
)
@property
def mid(self):
"""Model ID as :class:`pandas.Series` of integers."""
if self._anki_table in ["notes"]:
if "nmodel" not in self.columns:
raise ValueError(
"You seem to have removed the 'nmodel' column. That was not"
" a good idea. Cannot get model ID anymore."
)
else:
return self["nmodel"].map(raw.get_model2mid(self.db))
if self._anki_table in ["revs", "cards"]:
if "nmodel" in self.columns:
return self["nmodel"].map(raw.get_model2mid(self.db))
else:
return self.nid.map(raw.get_nid2mid(self.db))
else:
self._invalid_table()
@mid.setter
def mid(self, value):
if self._anki_table == "notes":
log.warning(
"You can set an additional 'mid' column, but this will always"
" be overwritten with the information from the 'nmodel' "
"column."
)
self["mid"] = value
@property
def did(self):
"""Deck ID as :class:`pandas.Series` of integers."""
if self._anki_table == "cards":
if "cdeck" not in self.columns:
raise ValueError(
"You seem to have removed the 'cdeck' column. That was not "
"a good idea. Cannot get deck ID anymore."
)
return self["cdeck"].map(raw.get_deck2did(self.db))
elif self._anki_table == "notes":
raise ValueError(
"Notes can belong to multiple decks. Therefore it is impossible"
" to associate a deck ID with them."
)
elif self._anki_table == "revs":
return self.cid.map(raw.get_cid2did(self.db))
else:
self._invalid_table()
@did.setter
def did(self, value):
if self._anki_table == "cards":
log.warning(
"You can set an additional deck ID 'did' column, but this "
"will always be overwritten with the information from the "
"'cdeck' column."
)
self["did"] = value
@property
def odid(self):
"""Original deck ID for cards in filtered deck as
:class:`pandas.Series` of integers.
"""
if self._anki_table == "cards":
if "odeck" not in self.columns:
raise ValueError(
"You seem to have removed the 'odeck' column. That was not "
"a good idea. Cannot get original deck ID anymore."
)
return self["odeck"].map(raw.get_deck2did(self.db))
elif self._anki_table == "revs":
if "odeck" in self.columns:
return self["odeck"].map(raw.get_deck2did(self.db))
elif self._anki_table == "notes":
raise ValueError(
"The original deck ID (odid) is not available for the notes "
"table."
)
else:
self._invalid_table()
@odid.setter
def odid(self, value):
if self._anki_table == "cards":
log.warning(
"You can set an additional 'odid' column, but this will always"
" be overwritten with the information from the 'odeck' "
"column."
)
self["odid"] = value
# Merge tables
# ==========================================================================
def merge_notes(
self,
inplace=False,
columns=None,
drop_columns=None,
prepend="n",
prepend_clash_only=True,
):
"""Merge note table into existing dataframe.
Args:
inplace: If False, return new dataframe, else update old one
columns: Columns to merge
drop_columns: Columns to ignore when merging
prepend: Prepend this string to fields from note table
prepend_clash_only: Only prepend the ``prepend`` string when column
names would otherwise clash.
Returns:
New :class:`AnkiDataFrame` if inplace==True, else None
"""
self._check_our_format()
if self._anki_table == "notes":
raise ValueError(
"AnkiDataFrame was already initialized as a table of type"
" notes, therefore merge_notes() doesn't make any sense."
)
elif self._anki_table == "revs":
self["nid"] = self.nid
ret = merge_dfs(
df=self,
df_add=self.col.notes,
id_df="nid",
id_add="nid",
inplace=inplace,
prepend=prepend,
prepend_clash_only=prepend_clash_only,
columns=columns,
drop_columns=drop_columns,
)
return ret
def merge_cards(
self,
inplace=False,
columns=None,
drop_columns=None,
prepend="c",
prepend_clash_only=True,
):
"""
Merges information from the card table into the current dataframe.
Args:
inplace: If False, return new dataframe, else update old one
columns: Columns to merge
drop_columns: Columns to ignore when merging
prepend: Prepend this string to fields from card table
prepend_clash_only: Only prepend the ``prepend`` string when column
names would otherwise clash.
Returns:
New :class:`AnkiDataFrame` if inplace==True, else None
"""
if self._anki_table == "cards":
raise ValueError(
"AnkiDataFrame was already initialized as a table of type"
" cards, therefore merge_cards() doesn't make any sense."
)
elif self._anki_table == "notes":
raise ValueError(
"One note can correspond to more than one card, therefore it "
"it is not supported to merge the cards table into the "
"notes table."
)
self._check_our_format()
ret = merge_dfs(
df=self,
df_add=self.col.cards,
id_df="cid",
inplace=inplace,
columns=columns,
drop_columns=drop_columns,
id_add="cid",
prepend=prepend,
prepend_clash_only=prepend_clash_only,
)
return ret
# Toggle format
# ==========================================================================
def fields_as_columns(self, inplace=False, force=False):
"""
In the 'notes' table, the field contents of the notes is contained in
one column ('flds') by default. With this method, this column can be
split up into a new column for every field.
Args:
inplace: If False, return new dataframe, else update old one
force: Internal use
Returns:
New :class:`pandas.DataFrame` if inplace==True, else None
"""
if not force:
self._check_our_format()
if not inplace:
df = self.copy(True)
df.fields_as_columns(inplace=True)
return df
if self._fields_format == "columns":
log.warning(
"Fields are already as columns."
" Returning without doing anything."
)
return
elif self._fields_format == "in_progress" and not force:
raise ValueError(
"It looks like the last call to fields_as_list or"
"fields_as_columns was not successful, so you better start "
"over."
)
elif self._fields_format == "list":
pass
else:
raise ValueError(f"Unknown _fields_format: {self._fields_format}")
if "nflds" not in self.columns:
raise ValueError("Could not find fields column 'nflds'.")
self._fields_format = "in_progress"
# fixme: What if one field column is one that is already in use?
prefix = self.fields_as_columns_prefix
mids = self.mid.unique()
for mid in mids:
if mid == 0:
continue
df_model = self[self.mid == mid]
fields = pd.DataFrame(df_model["nflds"].tolist())
field_names = raw.get_mid2fields(self.db)[mid]
for field in field_names:
if prefix + field not in self.columns:
self[prefix + field] = ""
for ifield, field in enumerate(field_names):
# todo: can we speed this up?
self.loc[self.mid == mid, [prefix + field]] = pd.Series(
fields[ifield].tolist(),
index=self.loc[self.mid == mid].index,
)
self.drop("nflds", axis=1, inplace=True)
self._fields_format = "columns"
def fields_as_list(self, inplace=False, force=False):
"""
This reverts :meth:`.fields_as_columns`, all columns that represented
field contents are now merged into one column 'nflds'.
Args:
inplace: If False, return new dataframe, else update old one
force: Internal use
Returns:
New :class:`AnkiDataFrame` if inplace==True, else None
"""
if not force:
self._check_our_format()
if not inplace:
df = self.copy(True)
df.fields_as_list(inplace=True, force=force)
return df
if self._fields_format == "list":
log.warning(
"Fields are already as list. Returning without doing anything."
)
return
elif self._fields_format == "in_progress" and not force:
raise ValueError(
"It looks like the last call to fields_as_list or"
"fields_as_columns was not successful, so you better start "
"over."
)
elif self._fields_format == "columns":
pass
else:
raise ValueError(f"Unknown _fields_format: {self._fields_format}")
self._fields_format = "in_progress"
mids = self.mid.unique()
to_drop = []
for mid in mids:
fields = raw.get_mid2fields(self.db)[mid]
fields = [self.fields_as_columns_prefix + field for field in fields]
self.loc[self.mid == mid, "nflds"] = pd.Series(
self.loc[self.mid == mid, fields].values.tolist(),
index=self.loc[self.mid == mid].index,
)
# Careful: Do not delete the fields here yet, other models
# might still use them
to_drop.extend(fields)
self.drop(to_drop, axis=1, inplace=True)
self._fields_format = "list"
# Quick access
# ==========================================================================
def _check_tag_col(self):
if "ntags" not in self.columns:
raise ValueError(
"Tag column 'ntags' doesn't exist. Perhaps you forgot to merge "
"the notes into your table?"
)
def list_tags(self) -> List[str]:
"""Return sorted list of all tags in the current table."""
if "ntags" not in self.columns:
raise ValueError(
"Tags column 'ntags' not present. Either use the notes table"
" or merge it into your table."
)
else:
return sorted(
{item for lst in self["ntags"].tolist() for item in lst}
)
def list_decks(self) -> List[str]:
"""Return sorted list of deck names in the current table."""
if "cdeck" not in self.columns:
raise ValueError(
"Deck column 'cdeck' not present. Either use the cards table "
"or merge it into your table."
)
else:
decks = sorted(self["cdeck"].unique())
if "" in decks:
decks.remove("")
return decks
def list_models(self):
"""Return sorted list of model names in the current table."""
if "nmodel" not in self.columns:
raise ValueError(
"Model column 'nmodel' not present. Either use the notes table"
" or merge it into your table."
)
return sorted(self["nmodel"].unique())
def has_tag(self, tags: Optional[Union[Iterable[str], str]] = None):
"""Checks whether row has a certain tag ('ntags' column).
Args:
tags: String or list thereof. In the latter case, True is returned
if the row contains any of the specified tags.
If None (default), True is returned if the row has any tag at
all.
Returns:
Boolean :class:`pd.Series`
Examples:
.. code-block:: python
# Get all tagged notes:
notes[notes.has_tag()]
# Get all untagged notes:
notes[~notes.has_tag()]
# Get all notes tagged Japanese:
japanese_notes = notes[notes.has_tag("Japanese")]
# Get all notes tagged either Japanese or Chinese:
asian_notes = notes[notes.has_tag(["Japanese", "Chinese"])]
"""
self._check_our_format()
self._check_tag_col()
if isinstance(tags, str):
tags = [tags]
if tags is not None:
def _has_tag(other):
return not set(tags).isdisjoint(other)
return self["ntags"].apply(_has_tag)
else:
return self["ntags"].apply(bool)
def has_tags(self, tags: Optional[Union[Iterable[str], str]] = None):
"""Checks whether row contains at least the supplied tags.
Args:
tags: String or list thereof.
If None (default), True is returned if the row has any tag at
all.
Returns:
Boolean :class:`pd.Series`
Examples:
.. code-block:: python
# Get all notes tagged BOTH Japanese or Chinese
bilingual_notes = notes[notes.has_tags(["Japanese", "Chinese"])]
# Note the difference to
asian_notes = notes[notes.has_tag(["Japanese", "Chinese"])]
"""
self._check_our_format()
if tags is None:
return self.has_tag(None)
self._check_tag_col()
if isinstance(tags, str):
tags = [tags]
_has_tags = set(tags).issubset
return self["ntags"].apply(_has_tags)
def add_tag(self, tags: Union[Sequence[str], str], inplace=False):
"""Adds tag ('ntags' column).
Args:
tags: String or list thereof.
inplace: If False, return new dataframe, else update old one
Returns:
New :class:`AnkiDataFrame` if inplace==True, else None
"""
self._check_our_format()
if not inplace:
df = self.copy(True)
df.add_tag(tags, inplace=True)
return df
self._check_tag_col()
if isinstance(tags, str):
tags = [tags]
if len(tags) == 0:
return
def _add_tags(other):
return other + sorted(set(tags) - set(other))
self["ntags"] = self["ntags"].apply(_add_tags)
def remove_tag(self, tags: Union[Iterable[str], str, None], inplace=False):
"""Removes tag ('ntags' column).
Args:
tags: String or list thereof. If None, all tags are removed.
inplace: If False, return new dataframe, else update old one
Returns:
New :class:`AnkiDataFrame` if inplace==True, else None
"""
self._check_our_format()
if not inplace:
df = self.copy(True)
df.remove_tag(tags, inplace=True)
return df
self._check_tag_col()
if isinstance(tags, str):
tags = [tags]
if tags is not None:
def _remove_tags(other):
return [tag for tag in other if tag not in tags]
self["ntags"] = self["ntags"].apply(_remove_tags)
else:
self["ntags"] = self["ntags"].apply(lambda _: [])
# Compare
# ==========================================================================
def was_modified(
self, other: Optional[pd.DataFrame] = None, na=True, _force=False
):
"""Compare with original table, show which rows have changed.
Will only compare columns existing in both dataframes.
Args:
other: Compare with this :class:`pandas.DataFrame`.
If None (default), use original unmodified dataframe as reloaded
from the database.
na: Value for new or deleted columns
_force: internal use
Returns:
Boolean value for each row, showing if it was modified.
"""
if not _force:
self._check_our_format()
if other is None:
_other = self.col._get_original_item(
self._anki_table
) # type: AnkiDataFrame
else:
_other = other
del other # avoid confusion
self_sf = self
if self._fields_format == "columns":
self_sf = self.fields_as_list(inplace=False, force=_force)
cols = sorted(set(self_sf.columns) & set(_other.columns))
other_nids = set(_other.index)
inters = set(self_sf.index & other_nids)
result = pd.Series(na, index=self_sf.index)
new_bools = np.any(
_other.loc[_other.index.isin(inters), cols].values
!= self_sf.loc[self_sf.index.isin(inters), cols].values,
axis=1,
)
result.loc[self_sf.index.isin(inters)] = pd.Series(
new_bools, index=result[self_sf.index.isin(inters)].index
)
return result
def modified_columns(
self, other: Optional[pd.DataFrame] = None, _force=False, only=True
):
"""Compare with original table, show which columns in which rows
were modified.
Args:
other: Compare with this :class:`pandas.DataFrame`.
If None (default), use original unmodified dataframe as reloaded
from the database.
only: Only show rows where at least one column is changed.
_force: internal use
Returns:
Boolean value for each row, showing if it was modified. New rows
are considered to be modified as well.
"""
if other is None:
other = self.init_with_table(col=self.col, table=self._anki_table)
cols = [c for c in self.columns if c in other.columns]
other_nids = set(other.index)
inters = set(self.index) & other_nids
if only:
inters &= set(
self[self.was_modified(other=other, _force=_force)].index
)
inters_st = sorted(inters)
del inters
return pd.DataFrame(
self.loc[inters_st, cols].values
!= other.loc[inters_st, cols].values,
index=self.loc[inters_st].index,
columns=cols,
)
def was_added(self, other: Optional[pd.DataFrame] = None, _force=False):
"""Compare with original table, show which rows were added.
Args:
other: Compare with this :class:`pandas.DataFrame`.
If None (default), use original unmodified dataframe as reloaded
from the database.
_force: internal use
Returns:
Boolean value for each row, showing if it was modified. New rows
are considered to be modified as well.
"""
if not _force:
self._check_our_format()
if other is not None:
other_ids = set(other.index)
else:
other_ids = set(self.col._get_original_item(self._anki_table).id)
new_indices = set(self.index) - other_ids
return self.index.isin(new_indices)
def was_deleted(
self, other: Optional[pd.DataFrame] = None, _force=False
) -> List:
"""Compare with original table, return deleted indizes.
Args:
other: Compare with this :class:`pandas.DataFrame`.
If None (default), use original unmodified dataframe as reloaded
from the database.
_force: internal use
Returns:
Sorted list of indizes.
"""
if not _force:
self._check_our_format()
if other is not None:
other_ids = set(other.index)
else:
other_ids = set(self.col._get_original_item(self._anki_table).id)
deleted_indices = other_ids - set(self.index)
return sorted(deleted_indices)
# Update modification stamps and similar
# ==========================================================================
def _set_usn(self):
"""Update usn (update sequence number) for all changed rows."""
self.loc[
self.was_modified(na=True, _force=True),
_columns.columns_anki2ours[self._anki_table]["usn"],
] = -1
def _set_mod(self):
"""Update modification timestamps for all changed rows."""
if self._anki_table in ["cards", "notes"]:
self.loc[
self.was_modified(na=True, _force=True),
_columns.columns_anki2ours[self._anki_table]["mod"],
] = int(time.time())
# todo: test
def _set_guid(self):
"""Update globally unique id"""
if self._anki_table == "notes":
self.loc[~self["nguid"].apply(bool)].apply(generate_guid)
# Raw and normalized
# ==========================================================================
def normalize(self, inplace=False, force=False):
"""Bring a :class:`AnkiDataFrame` from the ``raw`` format (i.e. the
exact format that Anki uses in its internal representation) to our
convenient format.
Args:
inplace: If False, return new dataframe, else update old one
force: If a previous conversion fails, :meth:`normalize` will
refuse to attempt another one by default. Use this option
to force it to attempt in anyway.
Returns:
New :class:`AnkiDataFrame` if inplace==True, else None
"""
if not inplace:
df = self.copy(True)
df.normalize(inplace=True, force=force)
return df
if not force:
self._check_df_format()
if self._df_format == "ours":
log.warning(
"Dataframe already is in our format. "
"Returning without doing anything."
)
return
table = self._anki_table
if table not in ["cards", "revs", "notes"]:
self._invalid_table()
self._df_format = "in_progress"
# Dtypes
# ------
for column, typ in _columns.dtype_casts[table].items():
self[column] = self[column].astype(typ)
# Renames
# -------
self.rename(columns=_columns.columns_anki2ours[table], inplace=True)
# Value maps
# ----------
# We sometimes interpret cryptic numeric values
if table in _columns.value_maps:
for column in _columns.value_maps[table]:
self[column] = self[column].map(
_columns.value_maps[table][column]
)
# IDs
# ---
id_field = _columns.table2index[table]
duplicate_ids = self[id_field][self[id_field].duplicated()].tolist()
if duplicate_ids:
log.critical(
"The following IDs occur "
"more than once: %s. Please do not use this dataframe.",
", ".join(map(str, duplicate_ids)),
)
self.set_index(id_field, inplace=True)
if table == "cards":
self["cdeck"] = self["did"].map(raw.get_did2deck(self.db))
self["codeck"] = self["codid"].map(raw.get_did2deck(self.db))
elif table == "notes":
self["nmodel"] = self["mid"].map(raw.get_mid2model(self.db))
# Tags
# ----
if table == "notes":
# Tags as list, rather than string joined by space
self["ntags"] = self["ntags"].apply(
lambda joined: [item for item in joined.split(" ") if item]
)
# Fields
# ------
if table == "notes":
# Fields as list, rather than as string joined by \x1f
self["nflds"] = self["nflds"].str.split("\x1f")
self._fields_format = "list"
# Drop columns
# ------------
drop_columns = set(self.columns) - set(_columns.our_columns[table])
self.drop(drop_columns, axis=1, inplace=True)
self.check_table_integrity()
self._df_format = "ours"
def raw(self, inplace=False, force=False):
"""Bring a :class:`AnkiDataFrame` into the ``raw`` format (i.e. the
exact format that Anki uses in its internal representation) .
Args:
inplace: If False, return new dataframe, else update old one
force: If a previous conversion fails, :meth:`raw` will
refuse to attempt another one by default. Use this option
to force it to attempt in anyway.
Returns:
New :class:`AnkiDataFrame` if inplace==True, else None
"""
if not inplace:
df = self.copy(True)
df.raw(inplace=True, force=force)
return df
if not force:
self._check_df_format()
if self._df_format == "anki":
log.warning(
"Dataframe already is in Anki format. "
"Returning without doing anything."
)
return
table = self._anki_table
if table not in ["revs", "cards", "notes"]:
self._invalid_table()
self._df_format = "in_progress"
# Note: Here we pretty much go through self.normalize() and revert
# every single step.
# Update automatic fields
# -----------------------
self._set_mod()
self._set_usn()
self._set_guid()
# IDs
# ---
# Index as column:
self.reset_index(inplace=True, drop=False)
if table == "cards":
self["did"] = self["cdeck"].map(raw.get_deck2did(self.db))
self["odid"] = self["codeck"].map(raw.get_deck2did(self.db))
if table == "notes":
self["mid"] = self["nmodel"].map(raw.get_model2mid(self.db))
# Fields & Hashes
# ---------------
if table == "notes":
if not self._fields_format == "list":
self.fields_as_list(inplace=True, force=True)
# Check if success
if not self._fields_format == "list":
raise ValueError(
"It looks like the last call to fields_as_list or"
"fields_as_columns was not successful, so you better start "
"over."
)
# Restore the sort field.
mids = list(self["mid"].unique())
mid2sfld = raw.get_mid2sortfield(self.db)
for mid in mids:
sfield = mid2sfld[mid]
df_model = self[self["mid"] == mid]
fields = pd.DataFrame(df_model["nflds"].tolist())
self.loc[self["mid"] == mid, "nsfld"] = fields[sfield].tolist()
self["ncsum"] = self["nflds"].apply(
lambda lst: field_checksum(lst[0])
)
self["nflds"] = self["nflds"].str.join("\x1f")
# Tags
# ----
if table == "notes" and "nflds" in self.columns:
self["ntags"] = self["ntags"].str.join(" ")
# Value Maps
# ----------
if table in _columns.value_maps:
for column in _columns.value_maps[table]:
if column not in self.columns:
continue
self[column] = self[column].map(
invert_dict(_columns.value_maps[table][column])
)
# Renames
# -------
self.rename(
columns=invert_dict(_columns.columns_anki2ours[table]), inplace=True
)
self.rename(columns={"index": "id"}, inplace=True)
# Dtypes
# ------
for column, typ in _columns.dtype_casts_back[table].items():
self[column] = self[column].astype(typ)
# Unused columns
# --------------
if table in ["cards", "notes"]:
self["data"] = ""
self["flags"] = 0
# Drop and Rearrange
# ------------------
# Todo: warn about dropped columns?
if len(self) == 0:
new = pd.DataFrame(columns=_columns.anki_columns[table])
else:
new = pd.DataFrame(self[_columns.anki_columns[table]])
self.drop(self.columns, axis=1, inplace=True)
for col in new.columns:
self[col] = new[col]
self.check_table_integrity()
self._df_format = "anki"
# Write
# ==========================================================================
def summarize_changes(self, output="print") -> Optional[dict]:
"""Summarize changes that were made with respect to the table
as loaded from the database.
Args:
output: Output mode: 'print' (default: print)
or 'dict' (return as dictionary)
Returns:
None or dictionary
"""
as_dict = {
"n": len(self),
"n_modified": sum(self.was_modified(na=False)),
"n_added": sum(self.was_added()),
"n_deleted": len(self.was_deleted()),
}
as_dict["has_changed"] = (
as_dict["n_modified"] or as_dict["n_added"] or as_dict["n_deleted"]
)
if output == "print":
print("Total rows: {}".format(as_dict["n"]))
print("Compared to original version:")
print("Modified rows: {}".format(as_dict["n_modified"]))
print("Added rows: {}".format(as_dict["n_added"]))
print("Deleted rows: {}".format(as_dict["n_deleted"]))
return None # make explicit for mypy
elif output == "dict":
return as_dict
else:
raise ValueError(f"Invalid output setting: {output}")
# Append
# ==========================================================================
def _get_id(self, others=()) -> int:
"""Generate ID from timestamp and increment if it is already in use.
.. warning::
Do not call repeatedly without adding new IDs to index (might
produce identical IDs). Rather use :meth:`_get_ids` instead.
"""
idx = int(1000 * time.time())
while idx in self.index or idx in others:
idx += 1
return idx
# todo: documentation
def add_card(
self,
nid: int,
cdeck: str,
cord: Optional[Union[int, List[int]]] = None,
cmod: Optional[int] = None,
cusn: Optional[int] = None,
cqueue: Optional[str] = None,
ctype: Optional[str] = None,
civl: Optional[int] = None,
cfactor: Optional[int] = None,
creps: Optional[int] = None,
clapses: Optional[int] = None,
cleft: Optional[int] = None,
cdue: Optional[int] = None,
inplace=False,
):
"""
Similar to :py:meth:`ankipandas.ankipdf.AnkiDataFrame.add_cards`
Args:
nid:
cdeck:
cord:
cmod:
cusn:
cqueue:
ctype:
civl:
cfactor:
creps:
clapses:
cleft:
cdue:
inplace:
Returns:
"""
return self.add_cards(
nid=[nid],
cdeck=cdeck,
cord=cord,
cmod=cmod,
cusn=cusn,
cqueue=cqueue,
ctype=ctype,
civl=civl,
cfactor=cfactor,
creps=creps,
clapses=clapses,
cleft=cleft,
cdue=cdue,
inplace=inplace,
)
# todo: change order of arguments?
# fixme: cord will be replaced
# todo: duplicate cards (same note, same cord)?
# fixme: This is an absolute mess with the signature and mypy...
def add_cards(
self,
nid: List[int],
cdeck: Union[str, List[str]],
cord: Optional[Union[int, List[int]]] = None,
cmod: Optional[Union[int, List[int]]] = None,
cusn: Optional[Union[int, List[int]]] = None,
cqueue: Optional[Union[str, List[str]]] = None,
ctype: Optional[Union[str, List[str]]] = None,
civl: Optional[Union[int, List[int]]] = None,
cfactor: Optional[Union[int, List[int]]] = None,
creps: Optional[Union[int, List[int]]] = None,
clapses: Optional[Union[int, List[int]]] = None,
cleft: Optional[Union[int, List[int]]] = None,
cdue: Optional[Union[int, List[int]]] = None,
inplace=False,
):
"""
Add cards belonging to notes of one model.
Args:
nid: Note IDs of the notes that you want to add cards for
cdeck: Name of deck to add cards to as string or list of strings
(different deck for each nid).
cord: Number of the template to add cards for as int or list
thereof. The template corresponds to the reviewing
direction. If left ``None`` (default), cards for all
templates will be added.
It is not possible to specify different cord for different
nids!
cmod: List of modification timestamps.
Will be set automatically if ``None`` (default) and it is
discouraged to set your own.
cusn: List of Update Sequence Numbers.
Will be set automatically (to -1, i.e. needs update)
if ``None`` (default) and it is
very discouraged to set your own.
cqueue: 'sched buried', 'user buried', 'suspended', 'new',
'learning', 'due', 'in learning' (learning but next rev at
least a day after the previous review). If ``None`` (default),
'new' is chosen for all cards. Specify as string or list
thereof.
ctype: List of card types ('learning', 'review', 'relearn', 'cram').
If ``None`` (default) 'learning' is chosen for all.
civl: The new interval that the card was pushed to after the review.
Positive values are in days, negative values are in seconds
(for learning cards). If ``None`` (default) 0 is chosen for
all cards.
cfactor: The new ease factor of the card in permille. If ``None``
(default) 0 is chosen for all.
creps: Number of reviews. If ``None`` (default), 0 is chosen for
all cards.
clapses: The number of times the card went from a 'was answered
correctly' to 'was answered incorrectly'. If ``None`` (default),
0 is chosen for all cards.
cleft: Of the form ``a*1000+b``, with: ``b`` the number of reps
left till graduation and ``a`` the number of reps left today.
If ``None`` (default), 0 is chosen for all cards.
cdue: Due is used differently for different card types: new:
note id or random int, due: integer day, relative to the
collection's creation time, learning: integer timestamp.
If ``None`` (default), check that we're adding a new card and
set to note ID.
inplace: If ``False`` (default), return a new
:class:`~ankipandas.AnkiDataFrame`, if True, modify in place and
return new card IDs
Returns:
:class:`~ankipandas.AnkiDataFrame` if ``inplace==True``, else
list of new card IDs
"""
self._check_our_format()
if not self._anki_table == "cards":
raise ValueError("Cards can only be added to cards table!")
# --- Ord ---
nid2mid = raw.get_nid2mid(self.db)
missing_nids = sorted(set(nid) - set(nid2mid))
if missing_nids:
raise ValueError(
"The following note IDs (nid) can't be found in the notes "
"table: {}. Perhaps you didn't call notes.write() to write "
"them back into the database?".format(
", ".join(map(str, missing_nids))
)
)
mids = {nid2mid[x] for x in nid}
if len(mids) >= 2:
raise ValueError(
"It is only supported to add cards for notes of the same model"
", but you're trying to add cards for notes of "
"models: {}".format(", ".join(map(str, mids)))
)
mid = mids.pop()
# fixme: should use function from ankipandas.raw
available_ords = raw.get_mid2templateords(self.db)[mid]
if cord is None:
cord = available_ords
elif isinstance(cord, int):
cord = [cord]
elif is_list_like(cord):
pass
else:
raise ValueError(
"Unknown type for cord specifiation: {}".format(type(cord))
)
not_available = sorted(set(cord) - set(available_ords))
if not_available:
raise ValueError(
"The following templates are not available for notes of "
"this model: {}".format(", ".join(map(str, not_available)))
)
# --- Deck ---
if isinstance(cdeck, str):
cdeck = [cdeck] * len(nid)
elif is_list_like(cdeck):
if len(cdeck) != len(nid):
raise ValueError(
"Number of decks doesn't match number of "
"notes for which cards should be added: {} "
"instead of {}.".format(len(cdeck), len(nid))
)
else:
raise ValueError("Unknown format for cdeck: {}".format(type(cdeck)))
unknown_decks = sorted(
set(cdeck) - set(raw.get_did2deck(self.db).values())
)
if unknown_decks:
raise ValueError(
"The following decks do not seem to exist: {}".format(
", ".join(unknown_decks)
)
)
# --- Rest ---
def _handle_input(inpt, name, default, typ, options=None) -> List[Any]:
if inpt is None:
inpt = [default] * len(nid)
elif is_list_like(inpt):
if len(inpt) != len(nid):
raise ValueError(
"Number of {} doesn't match number of "
"notes for which cards should be added: {} "
"instead of {}.".format(name, len(inpt), len(nid))
)
elif isinstance(inpt, typ):
inpt = [inpt] * len(nid)
else:
raise ValueError(
"Invalid type of {} specification: {}".format(
name, type(inpt)
)
)
if options is not None:
invalid = sorted(set(inpt) - set(options))
if invalid:
raise ValueError(
"The following values are no valid "
"entries for {}: {}".format(name, ", ".join(invalid))
)
return inpt
cmod = _handle_input(cmod, "cmod", int(time.time()), int)
cusn = _handle_input(cusn, "cusn", -1, int)
cqueue = _handle_input(
cqueue,
"cqueue",
"new",
str,
options=[
"sched buried",
"user buried",
"suspended",
"new",
"learning",
"due",
"in learning",
],
)
ctype = _handle_input(
ctype,
"ctype",
"learning",
str,
options=["learning", "review", "relearn", "cram"],
)
civl = _handle_input(civl, "civl", 0, int)
cfactor = _handle_input(cfactor, "cfactor", 0, int)
creps = _handle_input(creps, "creps", 0, int)
clapses = _handle_input(clapses, "clapses", 0, int)
cleft = _handle_input(cleft, "cleft", 0, int)
# --- Due ---
# Careful: Has to come after cqueue is defined!
if cdue is None:
if set(cqueue) == {"new"}:
cdue = nid
else:
raise ValueError(
"Due date can only be set automatically for cards of type"
"/queue 'new', but you have types: {}".format(
", ".join(set(cqueue))
)
)
elif is_list_like(cdue):
if len(cdue) != len(nid): # type: ignore
raise ValueError(
"Number of cdue doesn't match number of "
"notes for which cards should be added: {} "
"instead of {}.".format(len(cdue), len(nid)) # type: ignore
)
elif isinstance(cdue, int):
cdue = [cdue] * len(nid)
else:
raise ValueError(
"Invalid type of cdue specification: {}".format(type(cdue))
)
# Now we need to decide on contents for EVERY column in the DF
all_cids = self._get_ids(n=len(nid) * len(cord))
add = | pd.DataFrame(columns=self.columns, index=all_cids) | pandas.DataFrame |
import time
import numpy as np
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
URLS_PATH = "./data/urls_transferwise.csv"
CHROMEDRIVER_PATH = "./drivers/chromedriver"
# connect to chrome webdriver
options = Options()
options.add_argument('--headless')
# options.add_argument('--disable-gpu') # Last I checked this was necessary.
driver = webdriver.Chrome(CHROMEDRIVER_PATH, options=options)
# read list of URLs to scrape
urls = | pd.read_csv(URLS_PATH) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import shapefile
# import finoa
# import shapely
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
# from pyproj import Proj, transform
# import stateplane
# from datetime import datetime
import pickle
# import multiprocessing as mp
import gc
# In[2]:
import shapefile
import geopandas as gpd
# # Create crash table
# In[20]:
def create_crash_table(year='15'):
crash_add_gra_final = pd.read_pickle('/home/andy/Documents/sync/PITA_new/Data/crash_' + year + '_keypoint.pkl')
crash_add_gra_final['keplist_0x'] = [i[0] if type(i) == list else -1 for i in
crash_add_gra_final.PennShkeyplist_grav.values]
crash_add_gra_final['keplist_0y'] = [i[1] if type(i) == list else -1 for i in
crash_add_gra_final.PennShkeyplist_grav.values]
crash_14 = | pd.read_csv("../crashes/crash20" + year + "/CRASH.txt", low_memory=False) | pandas.read_csv |
import numpy as np
#import scipy.io #required to read Matlab *.mat file
from scipy import linalg
import pandas as pd
import networkx as nx
#import pickle
import itertools
from sklearn.covariance import GraphLassoCV, ledoit_wolf, graph_lasso
from statsmodels.stats.correlation_tools import cov_nearest
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as patches
from matplotlib.ticker import MaxNLocator
import json
import glob
def X_from_excel(file, gmax=5):
""" Reads pedigree data and outputs a balanced table,
where rows are pedigrees and columns are lineal positions.
gmax is the maximum allowed number of generations"""
X_raw = | pd.read_excel(file, index_col=[0,1]) | pandas.read_excel |
import os
import networkx as nx
import matplotlib.pyplot as plt
import geopandas as gpd
import pandas as pd
import numpy as np
import functools
import operator
import shapely.affinity
from shapely.ops import split
from shapely.geometry import Point, LineString, MultiLineString, GeometryCollection, Polygon
from math import degrees, atan2, acos, degrees
import warnings
import rasterio
from . import m2l_utils
from .m2l_utils import display, print
class Topology(object):
def __init__(self):
pass
def save_parfile(self, c_l, output_path, geology_file_csv, fault_file_csv, structure_file_csv, mindep_file_csv, minx, maxx, miny, maxy, deposit_dist, commodities):
with open('Parfile', 'w') as f:
f.write(
'--- COLUMN NAMES IN CSV DATA FILES: -------------------------------------------------------------\n')
f.write('OBJECT COORDINATES =WKT\n')
f.write('FAULT: ID ='+c_l['o']+'\n')
f.write('FAULT: FEATURE ='+c_l['f']+'\n')
f.write('POLYGON: ID ='+c_l['o']+'\n')
f.write('POLYGON: LEVEL1 NAME ='+c_l['u']+'\n')
f.write('POLYGON: LEVEL2 NAME ='+c_l['g']+'\n')
f.write('POLYGON: MIN AGE ='+c_l['min']+'\n')
f.write('POLYGON: MAX AGE ='+c_l['max']+'\n')
f.write('POLYGON: CODE ='+c_l['c']+'\n')
f.write('POLYGON: DESCRIPTION ='+c_l['ds']+'\n')
f.write('POLYGON: ROCKTYPE1 ='+c_l['r1']+'\n')
f.write('POLYGON: ROCKTYPE2 ='+c_l['r2']+'\n')
f.write('DEPOSIT: SITE CODE ='+c_l['msc']+'\n')
f.write('DEPOSIT: SITE TYPE ='+c_l['mst']+'\n')
f.write('DEPOSIT: SITE COMMODITY ='+c_l['mscm']+'\n')
f.write(
'--- SOME CONSTANTS: ----------------------------------------------------------------------------\n')
f.write('FAULT AXIAL FEATURE NAME ='+c_l['fold']+'\n')
f.write('SILL UNIT DESCRIPTION CONTAINS ='+c_l['sill']+'\n')
f.write(
'IGNEOUS ROCKTYPE CONTAINS ='+c_l['intrusive']+'\n')
f.write(
'VOLCANIC ROCKTYPE CONTAINS ='+c_l['volcanic']+'\n')
f.write(
'IGNORE DEPOSITS WITH SITE TYPE =Infrastructure\n')
f.write('Intersect Contact With Fault: angle epsilon (deg) =1.0\n')
f.write('Intersect Contact With Fault: distance epsilon (m) =15.0\n')
f.write('Distance buffer (fault stops on another fault) (m) =20.0\n')
f.write(
'Distance buffer (point on contact) (m) ='+str(deposit_dist)+'\n')
f.write('Intersect polygons distance buffer (for bad maps) =3.\n')
f.write(
'------------------------------------------------------------------------------------------------\n')
f.write(
'Path to the output data folder ='+output_path+'\n')
f.write(
'Path to geology data file ='+geology_file_csv+'\n')
f.write(
'Path to faults data file ='+fault_file_csv+'\n')
f.write(
'Path to mineral deposits data file ='+mindep_file_csv+'\n')
f.write(
'------------------------------------------------------------------------------------------------\n')
f.write('Clipping window X1 Y1 X2 Y2 (zeros for infinite) =' +
str(minx)+' '+str(miny)+' '+str(maxx)+' '+str(maxy)+'\n')
f.write('Min length fraction for strat/fault graphs =0.0\n')
f.write(
'Graph edge width categories (three doubles) =2000. 20000. 200000.\n')
f.write(
'Graph edge direction (0-min age, 1-max age, 2-avg) ='+str(2)+'\n')
f.write(
'Deposit names for adding info on the graph ='+commodities+'\n')
f.write('Partial graph polygon ID =32\n')
f.write('Partial graph depth =4\n')
f.write(
'Map subregion size dx, dy [m] (zeros for full map) =0. 0.\n')
f.write(
'------------------------------------------------------------------------------------------------\n')
####################################
# parse stratigraphy GML file to get number of series and series names
#
# get_series(path_in,id_label)
# Args:
# path_in path to gml file
# id_label code for eacah node that defines node type (group or formation)
####################################
def get_series(path_in, id_label):
# load a stratigraphy with groups (no longer true?: needs to feed via yed!!)
G = nx.read_gml(path_in, label=id_label)
glabels = {}
groups = 0
nlist = list(G.nodes)
for n in nlist: # Find out total number of groups and their names groups
if('isGroup' in G.nodes[n]):
groups += 1
glabels[n] = G.nodes[n]['LabelGraphics']['text'].replace(
" ", "_").replace("-", "_")
return(groups, glabels, G)
####################################
# parse stratigraphy GML file to save units for each series
#
# Save out csv of maximum and minimum ages of formations within each group so that groups can be better sorted.
# save_units(G,tmp_path,glabels)
# Args:
# G networkx format stratigraphy graph
# tmp_path directory of temporary outputs glabels list of group names
#
# The choice of what constitutes basic unit and what a group of units is defined in the c_l codes. Not even sure we need two levels but it seemed like a good idea at the time. Text outputs list alternate topologies for series and surfaces, which if confirmed by comapring max-min ages will be a nice source of uncertainty.
####################################
def save_units(G, path_out, glabels, Australia, asud_strat_file, quiet):
if Australia:
ASUD = pd.read_csv(asud_strat_file, ',')
for p in glabels: # process each group, removing nodes that are not part of that group, and other groups
GD = G.copy() # temporary copy of full graph
# print()
#print(p,glabels[p].replace(" ","_").replace("-","_"),"----------------------")
nlist = list(G.nodes)
for n in nlist: # Calculate total number of groups and their names groups
if('gid' in GD.nodes[n]): # normal node
if(GD.nodes[n]['gid'] != p): # normal node but not part of current group
GD.remove_node(n)
else: # group node
GD.remove_node(n)
labels = {}
for node in GD.nodes(): # local store of node labels
labels[node] = G.nodes[node]['LabelGraphics']['text'].replace(
" ", "_").replace("-", "_").replace("?", "_")
cycles = nx.simple_cycles(GD)
for cy in cycles:
found = False
if Australia:
len_cy = len(cy)
for i in range(len_cy-1):
glabel_0 = GD.nodes[cy[i]]['LabelGraphics']['text']
glabel_1 = GD.nodes[cy[i+1]]['LabelGraphics']['text']
edge = ASUD.loc[(ASUD['over'] == glabel_0) & (
ASUD['under'] == glabel_1)]
if(len(edge) == 0 and (not('isGroup' in GD.nodes[cy[i]]) and not('isGroup' in GD.nodes[cy[i+1]]))):
if(not GD.has_edge(cy[i], cy[i+1])):
continue
else:
warning_msg = 'map2loop warning 1: Stratigraphic relationship: ' + \
str(GD.nodes[cy[i]]['LabelGraphics']['text'])+' overlies '+str(
GD.nodes[cy[i+1]]['LabelGraphics']['text'])+' removed to prevent cycle'
warnings.warn(warning_msg)
GD.remove_edge(cy[i], cy[i+1])
found = True
if(not found):
glabel_0 = GD.nodes[cy[len_cy-1]
]['LabelGraphics']['text']
glabel_1 = GD.nodes[cy[0]]['LabelGraphics']['text']
edge = ASUD.loc[(ASUD['over'] == glabel_0) & (
ASUD['under'] == glabel_1)]
if(len(edge) == 0 and (not('isGroup' in GD.nodes[cy[len_cy-1]]) and not('isGroup' in GD.nodes[cy[0]]))):
if(GD.has_edge(cy[len_cy-1], cy[0])):
warning_msg = 'map2loop warning 1: Stratigraphic relationship: ' + \
str(GD.nodes[cy[len_cy-1]]['LabelGraphics']['text'])+' overlies '+str(
GD.nodes[cy[0]]['LabelGraphics']['text'])+' removed to prevent cycle'
warnings.warn(warning_msg)
GD.remove_edge(cy[len_cy-1], cy[0])
found = True
if(not found):
warning_msg = 'map2loop warning 2: Stratigraphic relationship: ' + \
str(GD.nodes[cy[0]]['LabelGraphics']['text'])+' overlies '+str(
GD.nodes[cy[1]]['LabelGraphics']['text'])+' removed to prevent cycle'
warnings.warn(warning_msg)
if(GD.has_edge(cy[len_cy-1], cy[0])):
if GD.has_edge(cy[0], cy[1]):
GD.remove_edge(cy[0], cy[1])
else:
warning_msg = 'map2loop warning 3: Stratigraphic relationship: ' + \
str(GD.nodes[cy[0]]['LabelGraphics']['text'])+' overlies '+str(
GD.nodes[cy[1]]['LabelGraphics']['text'])+' removed to prevent cycle'
warnings.warn(warning_msg)
if GD.has_edge(cy[0], cy[1]):
GD.remove_edge(cy[0], cy[1])
if not quiet:
plt.figure(p+1) # display strat graph for one group
plt.title(glabels[p])
plt.tight_layout()
nx.draw_networkx(GD, pos=nx.kamada_kawai_layout(
GD), arrows=True, with_labels=False)
nx.draw_networkx_labels(GD, pos=nx.kamada_kawai_layout(
GD), labels=labels, font_size=12, font_family='sans-serif')
one = True
if(one):
# one possible sorted directional graphs
nlist = list(nx.topological_sort(GD))
else:
# all possible sorted directional graphs
nlist = list(nx.all_topological_sorts(GD))
f = open(
os.path.join(path_out, glabels[p].replace(" ", "_").replace("-", "_").replace("?", "_")+'.csv'), 'w')
if(one):
f.write('Choice '+str(0))
for n in range(0, len(GD)): # display nodes for one sorted graph
f.write(","+G.nodes[nlist[n]]['LabelGraphics']
['text'].replace(" ", "_").replace("-", "_").replace("?", "_"))
f.write('\n')
else:
for m in range(10): # process first ten sorted graphs
f.write('Choice '+str(m))
for n in range(0, len(GD)): # display nodes for one sorted graph
#print(nlist[m][n],G.nodes[nlist[m][n]]['LabelGraphics']['text'].replace(" ","_").replace("-","_"))
f.write(","+G.nodes[nlist[m][n]]['LabelGraphics']
['text'].replace(" ", "_").replace("-", "_").replace("?", "_"))
# if(m<len(nlist)-1):
# print("....")
f.write('\n')
f.close()
####################################
# save out a list of max/min/ave ages of all formations in a group
#
# abs_age_groups(geol,tmp_path,c_l)
# Args:
# geol geopandas GeoDataBase of geology polygons
# c_l dictionary of codes and labels specific to input geo information layers
#
# Save out csv of maximum and minimum ages of formations within each group so that groups can be better sorted.
####################################
def abs_age_groups(geol, tmp_path, c_l):
groups = []
info = []
ages = []
for indx, a_poly in geol.iterrows(): # loop through all polygons
if(str(a_poly[c_l['g']]) == 'None'):
grp = a_poly[c_l['c']].replace(" ", "_").replace("-", "_").replace("?", "_")
else:
grp = a_poly[c_l['g']].replace(" ", "_").replace("-", "_").replace("?", "_")
# print(grp)
if(not grp in groups):
groups += [(grp)]
info += [(grp, a_poly[c_l['min']], a_poly[c_l['max']])]
# display(info)
# display(groups)
for j in range(0, len(groups)):
# print(groups[j],'------------------')
min_age = 1e10
max_age = 0
for i in range(0, len(info)):
if(info[i][0] == groups[j]):
if(float(info[i][1]) < min_age):
min_age = float(info[i][1])
min_ind = i
if(float(info[i][2]) > max_age):
max_age = float(info[i][2])
max_ind = i
# print(groups[j],min_age,max_age,(max_age+min_age)/2)
ages += [(groups[j], min_age, max_age, (max_age+min_age)/2)]
# print()
# for j in range(0,len(ages)):
# print(ages[j][0],ages[j][1],ages[j][2],ages[j][3],sep='\t')
# print()
slist = sorted(ages, key=lambda l: l[3])
f = open(os.path.join(tmp_path, 'age_sorted_groups.csv'), 'w')
f.write('index,group_,min,max,ave\n')
for j in range(0, len(slist)):
f.write(str(j)+','+slist[j][0]+','+str(slist[j][1]) +
','+str(slist[j][2])+','+str(slist[j][3])+'\n')
f.close()
####################################
# save out tables of groups and sorted formation data
#
# save_group(G,tmp_path,glabels,geol_clip,c_l)
# G networkx format stratigraphy graph
# tmp_path directory of temporary outputs glabels list of group names
# geol_clip path to clipped geology layer c_l dictionary of codes and labels specific to input geo information layers
#
# Takes stratigraphy graph created by map2model c++ code to generate list of groups found in the region of interest
# Uses first of each possible set of toplogies per unit and per group, which is arbitrary. On the other hand we are not checking relative ages again to see if this helps reduce ambiguity, which I think it would.
####################################
def save_group(self, G, path_out, glabels, geol, c_l, quiet):
Gp = nx.Graph().to_directed() # New Group graph
geology_file = gpd.read_file(os.path.join(path_out, 'geol_clip.shp'))
self.abs_age_groups(geol, path_out, c_l)
geology_file.drop_duplicates(subset=c_l['c'], inplace=True)
geology_file.set_index(c_l['c'], inplace=True)
# display(geology_file)
gp_ages = pd.read_csv(os.path.join(path_out, 'age_sorted_groups.csv'))
# display(gp_ages)
gp_ages.set_index('group_', inplace=True)
if not quiet:
display(gp_ages)
gp_ids = []
nlist = list(G.nodes)
for n in nlist: # Find out total number of groups and their names groups
if('isGroup' in G.nodes[n]):
Gp.add_nodes_from([n])
if not quiet:
display(gp_ids)
for e in G.edges:
if(G.nodes[e[0]]['gid'] != G.nodes[e[1]]['gid']):
glabel_0 = G.nodes[e[0]]['LabelGraphics']['text']
glabel_1 = G.nodes[e[1]]['LabelGraphics']['text']
# print(glabel_0, glabel_1)
# print(df[df.CODE == "A-FOh-xs-f"])
# exit()
if(str(geology_file.loc[glabel_0][c_l['g']]) == 'None'):
grp0 = glabel_0.replace(" ", "_").replace("-", "_").replace("?", "_")
else:
grp0 = geology_file.loc[glabel_0][c_l['g']].replace(
" ", "_").replace("-", "_")
if(str(geology_file.loc[glabel_1][c_l['g']]) == 'None'):
grp1 = glabel_1.replace(" ", "_").replace("-", "_").replace("?", "_")
else:
grp1 = geology_file.loc[glabel_1][c_l['g']].replace(
" ", "_").replace("-", "_").replace("?", "_")
# print(glabel_0,glabel_1,gp_ages.loc[grp0],gp_ages.loc[grp1])
if(grp0 in glabels.values() and grp1 in glabels.values()):
if(gp_ages.loc[grp0]['ave'] < gp_ages.loc[grp1]['ave']):
Gp.add_edge(G.nodes[e[0]]['gid'], G.nodes[e[1]]['gid'])
GpD = Gp.copy() # temporary copy of full graph
GpD2 = Gp.copy() # temporary copy of full graph
for e in GpD2.edges: # remove duplicate edges with opposite directions
for f in GpD.edges:
# arbitrary choice to ensure edge is not completely removed
if(e[0] == f[1] and e[1] == f[0] and e[0] < f[0]):
Gp.remove_edge(e[0], e[1])
if not quiet:
display(glabels)
plt.figure(1) # display strat graph for one group
plt.title("groups")
if(len(glabels) > 1):
nx.draw_networkx(Gp, pos=nx.kamada_kawai_layout(
Gp), arrows=True, with_labels=False)
nx.draw_networkx_labels(Gp, pos=nx.kamada_kawai_layout(
Gp), font_size=12, font_family='sans-serif')
# when lots of groups very large number of possibilities so short cut to first choice.
if(len(gp_ages) > 10):
# all possible sorted directional graphs
glist = list(nx.topological_sort(Gp))
print("group choices: 1 (more than 10 groups)")
f = open(os.path.join(path_out, 'groups.csv'), 'w')
glen = len(glist)
f.write('Choice 0')
for n in range(0, glen):
f.write(','+str(glabels[glist[n]])) # check underscore
f.write('\n')
f.close()
else:
# all possible sorted directional graphs
glist = list(nx.all_topological_sorts(Gp))
print("group choices:", len(glist))
f = open(os.path.join(path_out, 'groups.csv'), 'w')
glen = len(glist)
if(glen > 100):
glen = 100
for n in range(0, glen):
f.write('Choice '+str(n))
for m in range(0, len(glist[0])):
f.write(','+str(glabels[glist[n][m]])) # check underscore
f.write('\n')
f.close()
# display(glist)
nx.write_gml(Gp, os.path.join(path_out, 'groups.gml'))
# plt.show()
# g=open(os.path.join(path_out,'groups.csv'),"r")
#contents =g.readlines()
# g.close
#hdr=contents[0].split(" ")
contents = np.genfromtxt(
os.path.join(path_out, 'groups.csv'), delimiter=',', dtype='U100')
# display('lencon',len(contents[0]))
k = 0
ag = open(os.path.join(path_out, 'all_sorts.csv'), "w")
ag.write("index,group number,index in group,number in group,code,group\n")
if(len(contents.shape) == 1):
for i in range(1, len(contents)):
ucontents = np.genfromtxt(os.path.join(path_out, contents[i].replace(
"\n", "").replace(" ", "_")+".csv"), delimiter=',', dtype='U100')
# f=open(os.path.join(path_out, contents[i].replace("\n","").replace(" ","_")+".csv"),"r")#check underscore
#ucontents =f.readlines()
# f.close
# print(len(ucontents.shape),ucontents)
if(len(ucontents.shape) == 1):
for j in range(1, len(ucontents)):
ag.write(str(k)+","+str(i)+","+str(j)+","+str(len(ucontents)-1)+","+ucontents[j].replace(
"\n", "")+","+contents[i].replace("\n", "").replace(" ", "_").replace("-", "_")+"\n")
k = k+1
else:
for j in range(1, len(ucontents[0])):
ag.write(str(k)+","+str(i)+","+str(j)+","+str(len(ucontents[0])-1)+","+ucontents[0][j].replace(
"\n", "")+","+contents[i].replace("\n", "").replace(" ", "_").replace("-", "_")+"\n")
k = k+1
else:
for i in range(1, len(contents[0])):
ucontents = np.genfromtxt(os.path.join(path_out, contents[0][i].replace(
"\n", "").replace(" ", "_")+".csv"), delimiter=',', dtype='U100')
# f=open(os.path.join(path_out,contents[i].replace("\n","").replace(" ","_")+".csv"),"r")#check underscore
#ucontents =f.readlines()
# f.close
# print(len(ucontents.shape),ucontents)
if(len(ucontents.shape) == 1):
for j in range(1, len(ucontents)):
ag.write(str(k)+","+str(i)+","+str(j)+","+str(len(ucontents)-1)+","+ucontents[j].replace(
"\n", "")+","+contents[0][i].replace("\n", "").replace(" ", "_").replace("-", "_")+"\n")
k = k+1
else:
for j in range(1, len(ucontents[0])):
ag.write(str(k)+","+str(i)+","+str(j)+","+str(len(ucontents[0])-1)+","+ucontents[0][j].replace(
"\n", "")+","+contents[0][i].replace("\n", "").replace(" ", "_").replace("-", "_")+"\n")
k = k+1
ag.close()
####################################
# save out fault fault relationship information as array
#
# parse_fault_relationships(graph_path,tmp_path,output_path)
# graph_path path to graphs
# tmp_path path to tmp directory
# output_path path to output directory
#
# Saves fault vs unit, group and fault relationship tables using outputs from map2model c++ code
####################################
def parse_fault_relationships(graph_path, tmp_path, output_path, quiet):
uf = open(os.path.join(graph_path, 'unit-fault-intersection.txt'), 'r')
contents = uf.readlines()
uf.close()
all_long_faults = np.genfromtxt(
os.path.join(output_path, 'fault_dimensions.csv'), delimiter=',', dtype='U100')
n_faults = len(all_long_faults)
# print(n_faults)
all_faults = {}
unique_list = []
# display(unique_list)
for i in range(1, n_faults):
f = all_long_faults[i][0]
# print(all_long_faults[i][0])
if f not in unique_list:
unique_list.append(f.replace("Fault_", ""))
#display('Long Faults',unique_list)
uf = open(os.path.join(output_path, 'unit-fault-relationships.csv'), 'w')
uf.write('code,'+str(unique_list).replace("[", "Fault_").replace(
",", ",Fault_").replace("'", "").replace("]", "").replace(" ", "")+'\n')
for row in contents:
row = row.replace("\n", "").split("{")
unit = row[0].split(',')
faults = row[1].replace("}", "").replace(" ", "").split(",")
ostr = str(unit[1]).strip().replace(" ", "_").replace("-", "_")
for ul in unique_list:
out = [item for item in faults if ul == item]
if(len(out) > 0):
ostr = ostr+",1"
else:
ostr = ostr+",0"
uf.write(ostr+"\n")
uf.close()
summary = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'))
summary.set_index("code", inplace=True)
# display('summary',summary)
uf_rel = pd.read_csv(os.path.join(
output_path, 'unit-fault-relationships.csv'))
groups = summary.group.unique()
ngroups = len(summary.group.unique())
# print(ngroups,'groups',groups,groups[0])
uf_array = uf_rel.to_numpy()
gf_array = np.zeros((ngroups, uf_array.shape[1]), dtype='U100')
for i in range(0, ngroups):
for j in range(0, len(uf_rel)):
if(uf_rel.iloc[j][0] in summary.index.values):
gsummary = summary.loc[uf_rel.iloc[j][0]]
if(groups[i].replace("\n", "") == gsummary['group']):
for k in range(1, len(uf_rel.iloc[j])):
if(uf_rel.iloc[j][k] == 1):
gf_array[i-1, k] = '1'
else:
continue
else:
continue
else:
continue
ug = open(os.path.join(output_path, 'group-fault-relationships.csv'), 'w')
ug.write('group')
for k in range(1, len(uf_rel.iloc[0])):
ug.write(','+uf_rel.columns[k])
ug.write("\n")
for i in range(0, ngroups):
ug.write(groups[i].replace("\n", ""))
for k in range(1, len(uf_rel.iloc[0])):
if(gf_array[i-1, k] == '1'):
ug.write(',1')
else:
ug.write(',0')
ug.write("\n")
ug.close()
uf = open(os.path.join(graph_path, 'fault-fault-intersection.txt'), 'r')
contents = uf.readlines()
uf.close()
# display(unique_list)
unique_list_ff = []
for row in contents:
row = row.replace("\n", "").split("{")
fault_1o = row[0].split(',')
fault_1o = fault_1o[1]
faults_2o = row[1].replace("(", "").replace(
")", "").replace("}", "").split(",")
if ((fault_1o.replace(" ", "") not in unique_list_ff) and (fault_1o.replace(" ", "") in unique_list)):
unique_list_ff.append(fault_1o.replace(" ", ""))
for i in range(0, len(faults_2o), 3):
if ((faults_2o[i].replace(" ", "") not in unique_list_ff) and (faults_2o[i].replace(" ", "") in unique_list)):
unique_list_ff.append(faults_2o[i].replace(" ", ""))
# display(unique_list)
G = nx.DiGraph()
ff = open(os.path.join(output_path, 'fault-fault-relationships.csv'), 'w')
ff.write('fault_id')
for i in range(0, len(unique_list_ff)):
ff.write(','+'Fault_'+unique_list_ff[i])
G.add_node('Fault_'+unique_list_ff[i])
ff.write('\n')
for i in range(0, len(unique_list_ff)): # loop thorugh rows
ff.write('Fault_'+unique_list_ff[i])
found = False
# for j in range(0,len(unique_list)):
for row in contents: # loop thorugh known intersections
row = row.replace("\n", "").split("{")
fault_1o = row[0].split(',')
fault_1o = fault_1o[1]
faults_2o = row[1].replace("(", "").replace(
")", "").replace("}", "").split(",")
# correct first order fault for this row
if(unique_list_ff[i].replace(" ", "") == fault_1o.replace(" ", "")):
found = True
for k in range(0, len(unique_list_ff)): # loop through columns
found2 = False
if(k == i): # no self intersections
ff.write(',0')
else:
# loop through second order faults for this row
for f2o in range(0, len(faults_2o), 3):
if (faults_2o[f2o].replace(" ", "") == unique_list_ff[k].replace(" ", "")):
ff.write(',1')
G.add_edge(
'Fault_'+unique_list_ff[i], 'Fault_'+faults_2o[f2o].replace(" ", ""))
found2 = True
break
if(not found2 and k != i):
# this is not a second order fault for this row
ff.write(',0')
if(found):
break
if(not found): # this fault is not a first order fault relative to another fault
for i in range(0, len(unique_list_ff)):
ff.write(',0')
ff.write('\n')
ff.close()
if not quiet:
nx.draw(G, with_labels=True, font_weight='bold')
GD = G.copy()
edges = list(G.edges)
cycles = list(nx.simple_cycles(G))
for i in range(0, len(edges)): # remove first edge from fault cycle
for j in range(0, len(cycles)):
if (edges[i][0] == cycles[j][0]
and edges[i][1] == cycles[j][1]):
if GD.has_edge(edges[i][0],edges[i][1]):
GD.remove_edge(edges[i][0],edges[i][1])
print('fault cycle removed:',edges[i][0],edges[i][1])
g=open(os.path.join(graph_path,'fault-fault-intersection.txt'),"r")
contents =g.readlines()
for line in contents:
parts=line.split(",")
f1='Fault_'+parts[1]
f1=f1.replace(" ","")
for fn in range(int((len(parts)-2)/3)):
f2='Fault_'+parts[2+(fn*3)].replace("{","").replace("(","")
f2=f2.replace(" ","")
topol=parts[3+(fn*3)].replace(" ","")
angle=parts[4+(fn*3)].replace("}","").replace(")","")
angle=angle.replace(" ","").replace("}","").replace(")","").rstrip()
if GD.has_edge(f1,f2):
GD[f1][f2]['angle']=angle
GD[f1][f2]['topol']=topol
nx.write_gml(GD, os.path.join(tmp_path, "fault_network.gml"))
try:
print('cycles', list(nx.simple_cycles(GD)))
except:
print('no cycles')
####################################
# save out geology polygons in WKT format
#
# save_geol_wkt(sub_geol,geology_file_csv, c_l)
# sub_geol geopandas format geology layer geology_file_csv path to output WKT format file
# c_l dictionary of codes and labels specific to input geo information layers
#
# Saves geology layer as WKT format for use by map2model c++ code
####################################
def save_geol_wkt(sub_geol, geology_file_csv, c_l, hint_flag):
# hint_flag=False
if(hint_flag == True):
print("Using ENS age hints")
code_hint_file = '../test_data3/data/code_age_hint.csv' # input code_hint file
code_hints = pd.read_csv(code_hint_file, sep=',')
code_hints.drop_duplicates(inplace=True)
code_hints.set_index('code', inplace=True)
hint_list = []
for indx, row in code_hints.iterrows():
if(not indx in hint_list):
hint_list.append(indx)
f = open(geology_file_csv, "w+")
f.write('WKT\t'+c_l['o'].replace("\n", "")+'\t'+c_l['u'].replace("\n", "")+'\t'+c_l['g'].replace("\n", "")+'\t'+c_l['min'].replace("\n", "")+'\t'+c_l['max'].replace(
"\n", "")+'\t'+c_l['c'].replace("\n", "")+'\t'+c_l['r1'].replace("\n", "")+'\t'+c_l['r2'].replace("\n", "")+'\t'+c_l['ds'].replace("\n", "")+'\n')
# display(sub_geol)
print(len(sub_geol), " polygons")
# print(sub_geol)
for i in range(0, len(sub_geol)):
# print('**',sub_geol.loc[i][[c_l['o']]],'++')
f.write("\""+str(sub_geol.loc[i].geometry)+"\"\t")
f.write("\""+str(sub_geol.loc[i][c_l['o']])+"\"\t")
f.write("\""+str(sub_geol.loc[i][c_l['c']])+"\"\t")
# since map2model is looking for "" not "None"
f.write(
"\""+str(sub_geol.loc[i][c_l['g']]).replace("None", "")+"\"\t")
if(hint_flag == True and sub_geol.loc[i][c_l['c']] in hint_list):
hint = code_hints.loc[sub_geol.loc[i][c_l['c']]]['hint']
else:
hint = 0.0
if(str(sub_geol.loc[i][c_l['min']]) == 'None'):
min = 0.0
else:
min = float(sub_geol.loc[i][c_l['min']])+float(hint)
# print(str(sub_geol.loc[i][c_l['max']]))
if(str(sub_geol.loc[i][c_l['max']]) == 'None'):
max = 4500000000
else:
max = float(sub_geol.loc[i][c_l['max']])+float(hint)
# f.write("\""+str(sub_geol.loc[i][c_l['min']])+"\"\t")
# f.write("\""+str(sub_geol.loc[i][c_l['max']])+"\"\t")
f.write("\""+str(min)+"\"\t")
f.write("\""+str(max)+"\"\t")
f.write("\""+str(sub_geol.loc[i][c_l['u']])+"\"\t")
f.write("\""+str(sub_geol.loc[i][c_l['r1']])+"\"\t")
f.write("\""+str(sub_geol.loc[i][c_l['r2']])+"\"\t")
f.write("\""+str(sub_geol.loc[i][c_l['ds']])+"\"\n")
f.close()
####################################
# save out orientation points in WKT format
#
# save_structure_wkt(sub_pts,structure_file_csv,c_l)
# sub_pts geopandas format orientation layer
# structure_file_csv path to output WKT format file
# c_l dictionary of codes and labels specific to input geo information layers
#
# Saves orientation layer as WKT format for use by map2model c++ code
####################################
def save_structure_wkt(sub_pts, structure_file_csv, c_l):
sub_pts.columns = ['WKT'] + list(sub_pts.columns[1:])
sub_pts.to_csv(structure_file_csv, sep='\t',
index=False)
####################################
# save out mineral deposit points in WKT format
#
# save_mindep_wkt(sub_mindep,mindep_file_csv,c_l)
# sub_mindep geopandas format mineral deposit layer
# mindep_file_csv path to output WKT format file
# c_l dictionary of codes and labels specific to input geo information layers
#
# Saves mineral deposit layer as WKT format for use by map2model c++ code
####################################
def save_mindep_wkt(sub_mindep, mindep_file_csv, c_l):
sub_mindep.columns = ['WKT'] + list(sub_mindep.columns[1:])
sub_mindep.to_csv(mindep_file_csv, sep='\t',
index=False)
# f = open(mindep_file_csv, "w+")
# f.write('WKT\t'+c_l['msc']+'\t'+c_l['msn']+'\t'+c_l['mst'] +
# '\t'+c_l['mtc']+'\t'+c_l['mscm']+'\t'+c_l['mcom']+'\n')
# print(len(sub_mindep), " points")
# for i in range(0, len(sub_mindep)):
# line = "\""+str(sub_mindep.loc[i].geometry)+"\"\t\""+str(sub_mindep.loc[i][c_l['msc']])+"\"\t\"" +\
# str(sub_mindep.loc[i][c_l['msn']])+"\"\t\""+str(sub_mindep.loc[i][c_l['mst']])+"\"\t\"" +\
# str(sub_mindep.loc[i][c_l['mtc']])+"\"\t\""+str(sub_mindep.loc[i][c_l['mscm']])+"\"\t\"" +\
# str(sub_mindep.loc[i][c_l['mcom']])+"\"\n"
# f.write(functools.reduce(operator.add, (line)))
# f.close()
####################################
# save out fault polylines in WKT format
#
# save_faults_wkt(sub_lines,fault_file_csv,c_l)
# sub_geol geopandas format fault and fold axial trace layer geology_file_csv path to output WKT format file
# c_l dictionary of codes and labels specific to input geo information layers
#
# Saves fault/fold axial trace layer as WKT format for use by map2model c++ code
####################################
def save_faults_wkt(sub_lines, fault_file_csv, c_l):
# Change geometry column name to WKT
sub_lines.columns = ['WKT'] + list(sub_lines.columns[1:])
# Filter cells with a feature description containing the word 'fault'
mask = sub_lines[c_l['f']].str.contains(
'fault', na=False, case=False, regex=True)
sub_faults = sub_lines[mask]
sub_faults.to_csv(fault_file_csv, sep='\t',
index=False)
def check_near_fault_contacts(path_faults, all_sorts_path, fault_dimensions_path, gp_fault_rel_path, contacts_path, c_l, proj_crs):
faults_clip = gpd.read_file(path_faults)
gp_fault_rel = pd.read_csv(gp_fault_rel_path)
gp_fault_rel.set_index('group', inplace=True)
contacts = pd.read_csv(contacts_path)
all_sorts = pd.read_csv(all_sorts_path)
all_sorts.set_index('code', inplace=True)
fault_dimensions = pd.read_csv(fault_dimensions_path)
fault_dimensions2 = fault_dimensions.set_index('Fault')
groups = all_sorts['group'].unique()
for indx, flt in faults_clip.iterrows():
#print('Fault_'+str(flt[c_l['o']]) )
if('Fault_'+str(flt[c_l['o']]) in fault_dimensions['Fault'].values):
#print('Fault_'+str(flt[c_l['o']]),flt.geometry.type,flt.geometry.centroid )
if(flt.geometry.type == 'LineString'):
flt_ls = LineString(flt.geometry)
midx = flt_ls.coords[0][0]+((flt_ls.coords[0]
[0]-flt_ls.coords[len(flt_ls.coords)-1][0])/2)
midy = flt_ls.coords[0][1]+((flt_ls.coords[0]
[1]-flt_ls.coords[len(flt_ls.coords)-1][1])/2)
l, m = m2l_utils.pts2dircos(flt_ls.coords[0][0], flt_ls.coords[0][1],
flt_ls.coords[len(flt_ls.coords)-1][0], flt_ls.coords[len(flt_ls.coords)-1][1])
angle = (360+degrees(atan2(l, m))) % 360
xax = fault_dimensions2.loc['Fault_' +
str(flt[c_l['o']])]['HorizontalRadius']*.99*.81
yax = fault_dimensions2.loc['Fault_' +
str(flt[c_l['o']])]['InfluenceDistance']*.99*.81
circle = Point(flt.geometry.centroid.x, flt.geometry.centroid.y).buffer(
1) # type(circle)=polygon
ellipse = shapely.affinity.scale(
circle, xax, yax) # type(ellipse)=polygon
ellipse = shapely.affinity.rotate(
ellipse, 90-angle, origin='center', use_radians=False)
splits = split(ellipse, flt.geometry)
# display(flt.geometry)
i = 0
for gp in groups:
if(not gp == 'cover'):
all_sorts2 = all_sorts[all_sorts["group"] == gp]
first = True
for half in splits:
half_poly = Polygon(half)
half_ellipse = gpd.GeoDataFrame(
index=[0], crs=proj_crs, geometry=[half_poly])
has_contacts = True
for indx, as2 in all_sorts2.iterrows():
contacts2 = contacts[contacts["formation"] == indx]
if(first):
first = False
all_contacts = contacts2.copy()
else:
all_contacts = pd.concat(
[all_contacts, contacts2], sort=False)
contacts_gdf = gpd.GeoDataFrame(all_contacts, geometry=[Point(
x, y) for x, y in zip(all_contacts.X, all_contacts.Y)])
found = gpd.sjoin(
contacts_gdf, half_ellipse, how='inner', op='within')
if(len(found) > 0 and has_contacts):
has_contacts = True
else:
has_contacts = False
i = i+1
# print('Fault_'+str(flt[c_l['o']]),gp,has_contacts)
if(not has_contacts):
if(gp_fault_rel.loc[gp, 'Fault_'+str(flt[c_l['o']])] == 1):
print(
gp, 'Fault_'+str(flt[c_l['o']]), 'combination switched OFF')
gp_fault_rel.loc[gp, 'Fault_' +
str(flt[c_l['o']])] = 0
elif(flt.geometry.type == 'MultiLineString' or flt.geometry.type == 'GeometryCollection'):
raise NameError('map2loop error: Fault_'+str(
flt[c_l['o']])+'cannot be analysed as it is a multilinestring,\n it may be a fault that is clipped into two parts by the bounding box\nfix in original shapefile??')
continue
for pline in flt.geometry:
flt_ls = LineString(pline)
midx = flt_ls.coords[0][0]+(
(flt_ls.coords[0][0]-flt_ls.coords[len(flt_ls.coords)-1][0])/2)
midy = flt_ls.coords[0][1]+(
(flt_ls.coords[0][1]-flt_ls.coords[len(flt_ls.coords)-1][1])/2)
l, m = m2l_utils.pts2dircos(flt_ls.coords[0][0], flt_ls.coords[0][1],
flt_ls.coords[len(flt_ls.coords)-1][0], flt_ls.coords[len(flt_ls.coords)-1][1])
angle = (360+degrees(atan2(l, m))) % 360
xax = fault_dimensions2.loc['Fault_' +
str(flt[c_l['o']])]['HorizontalRadius']*.99*.81
yax = fault_dimensions2.loc['Fault_' +
str(flt[c_l['o']])]['InfluenceDistance']*.99*.81
circle = Point(flt.geometry.centroid.x, flt.geometry.centroid.y).buffer(
1) # type(circle)=polygon
ellipse = shapely.affinity.scale(
circle, xax, yax) # type(ellipse)=polygon
ellipse = shapely.affinity.rotate(
ellipse, 90-angle, origin='center', use_radians=False)
splits = split(ellipse, flt.geometry)
# display(splits)
i = 0
for gp in groups:
if(not gp == 'cover'):
all_sorts2 = all_sorts[all_sorts["group"] == gp]
first = True
for half in splits:
half_poly = Polygon(half)
half_ellipse = gpd.GeoDataFrame(
index=[0], crs=proj_crs, geometry=[half_poly])
has_contacts = True
for indx, as2 in all_sorts2.iterrows():
contacts2 = contacts[contacts["formation"] == indx]
if(first):
first = False
all_contacts = contacts2.copy()
else:
all_contacts = pd.concat(
[all_contacts, contacts2], sort=False)
contacts_gdf = gpd.GeoDataFrame(all_contacts, geometry=[Point(
x, y) for x, y in zip(all_contacts.X, all_contacts.Y)])
found = gpd.sjoin(
contacts_gdf, half_ellipse, how='inner', op='within')
if(len(found) > 0 and has_contacts):
has_contacts = True
else:
has_contacts = False
i = i+1
# print('Fault_'+str(flt[c_l['o']]),gp,has_contacts)
if(not has_contacts):
if(gp_fault_rel.loc[gp, 'Fault_'+str(flt[c_l['o']])] == 1):
print(
gp, 'Fault_'+str(flt[c_l['o']]), 'combination switched OFF')
gp_fault_rel.loc[gp, 'Fault_' +
str(flt[c_l['o']])] = 0
gp_fault_rel.to_csv(gp_fault_rel_path)
def super_groups_and_groups(group_girdle, tmp_path, misorientation, c_l,cover_map):
group_girdle = pd.DataFrame.from_dict(group_girdle, orient='index')
group_girdle.columns = ['plunge', 'bearing', 'num orientations']
group_girdle.sort_values(
by='num orientations', ascending=False, inplace=True)
display(group_girdle)
l, m, n = m2l_utils.ddd2dircos(
group_girdle.iloc[0]['plunge'], group_girdle.iloc[0]['bearing'])
super_group = pd.DataFrame([[group_girdle[0:1].index[0], 'Super_Group_0', l, m, n]], columns=[
'Group', 'Super_Group', 'l', 'm', 'n'])
super_group.set_index('Group', inplace=True)
geol = gpd.read_file(os.path.join(tmp_path, 'geol_clip.shp'))
geol = geol.drop_duplicates(subset=c_l['c'], keep="first")
geol=geol.set_index(c_l['g'])
sg_index = 0
for i in range(1, len(group_girdle)):
#if(c_l['intrusive'] in geol.loc[group_girdle.iloc[i].name.replace("_"," ")][c_l['r1']]
#and c_l['sill'] not in geol.loc[group_girdle.iloc[i].name.replace("_"," ")][c_l['ds']]):
if(c_l['intrusive'] in geol.loc[group_girdle.iloc[i].name][c_l['r1']]
and c_l['sill'] not in geol.loc[group_girdle.iloc[i].name][c_l['ds']]):
sg_index = sg_index+1
#print('not found',sg_index)
sgname = 'Super_Group_'+str(sg_index)
super_group_new = pd.DataFrame(
[[group_girdle[i:i+1].index[0], sgname, l, m, n]], columns=['Group', 'Super_Group', 'l', 'm', 'n'])
super_group_new.set_index('Group', inplace=True)
super_group = super_group.append(super_group_new)
elif(group_girdle.iloc[i]['num orientations'] > 5):
l, m, n = m2l_utils.ddd2dircos(
group_girdle.iloc[i]['plunge'], group_girdle.iloc[i]['bearing'])
found = False
sg_i = 0
for ind, sg in super_group.iterrows():
c = sg['l']*l + sg['m']*m + sg['n']*n
if c > 1:
c = 1
c = degrees(acos(c))
if(c < misorientation and not found):
found = True
sgname = 'Super_Group_'+str(sg_i)
super_group_old = pd.DataFrame(
[[group_girdle[i:i+1].index[0], sgname, l, m, n]], columns=['Group', 'Super_Group', 'l', 'm', 'n'])
super_group_old.set_index('Group', inplace=True)
super_group = super_group.append(super_group_old)
sg_i = sg_i+1
if(not found):
sg_index = sg_index+1
#print('not found',sg_index)
sgname = 'Super_Group_'+str(sg_index)
super_group_new = pd.DataFrame(
[[group_girdle[i:i+1].index[0], sgname, l, m, n]], columns=['Group', 'Super_Group', 'l', 'm', 'n'])
super_group_new.set_index('Group', inplace=True)
super_group = super_group.append(super_group_new)
else: # not enough orientations to test, so lumped with group with most orientations
sgname = 'Super_Group_'+str(0)
super_group_old = pd.DataFrame(
[[group_girdle[i:i+1].index[0], sgname, l, m, n]], columns=['Group', 'Super_Group', 'l', 'm', 'n'])
super_group_old.set_index('Group', inplace=True)
super_group = super_group.append(super_group_old)
use_gcode3 = []
for ind, sg in super_group.iterrows():
clean = ind.replace(" ", "_").replace("-", "_")
use_gcode3.append(clean)
if(cover_map):
use_gcode3.append('cover')
sg2 = set(super_group['Super_Group'])
super_groups = []
if(cover_map):
super_groups.append(['cover'])
for s in sg2:
temp = []
for ind, sg in super_group.iterrows():
if(s == sg['Super_Group']):
temp.append(ind)
super_groups.append(temp)
f = open(os.path.join(tmp_path, 'super_groups.csv'), 'w')
for sg in super_groups:
for s in sg:
f.write(str(s)+',')
f.write('\n')
f.close()
return(super_groups, use_gcode3)
def use_asud(strat_graph_file, graph_path):
asud_strat_file = "https://gist.githubusercontent.com/yohanderose/3b257dc768fafe5aaf70e64ae55e4c42/raw/8598c7563c1eea5c0cd1080f2c418dc975cc5433/ASUD.csv"
G = nx.read_gml(strat_graph_file, label='id')
Gp = G.copy().to_directed()
try:
ASUD = pd.read_csv(asud_strat_file, ",")
except Exception as e:
print(e)
return
for e in G.edges:
glabel_0 = G.nodes[e[0]]['LabelGraphics']['text']
glabel_1 = G.nodes[e[1]]['LabelGraphics']['text']
edge = ASUD.loc[(ASUD['over'] == glabel_0) &
(ASUD['under'] == glabel_1)]
if(len(edge) > 0 and (not('isGroup' in Gp.nodes[e[0]]) and not('isGroup' in Gp.nodes[e[1]]))):
if(Gp.has_edge(e[1], e[0])):
Gp.remove_edge(e[1], e[0])
if(Gp.has_edge(e[0], e[1])):
Gp.remove_edge(e[0], e[1])
Gp.add_edge(e[0], e[1])
edge = ASUD.loc[(ASUD['under'] == glabel_0) &
(ASUD['over'] == glabel_1)]
if(len(edge) > 0 and (not('isGroup' in Gp.nodes[e[0]]) and not('isGroup' in Gp.nodes[e[1]]))):
if(Gp.has_edge(e[0], e[1])):
Gp.remove_edge(e[0], e[1])
if(Gp.has_edge(e[1], e[0])):
Gp.remove_edge(e[1], e[0])
Gp.add_edge(e[1], e[0])
recode = {}
i = 0
for n in Gp.nodes:
if('isGroup' in Gp.nodes[n]):
recode[n] = i
i = i+1
for n in Gp.nodes:
if(not 'isGroup' in Gp.nodes[n]):
Gp.nodes[n]['gid'] = recode[Gp.nodes[n]['gid']]
# check for cycle and arbitrarily remove first of the edges
#nx.write_gml(Gp,os.path.join(graph_path, 'ASUD_strat_test.gml'))
try:
cycles = list(nx.simple_cycles(G))
for c in cycles:
G.remove_edge(c[0], c[1])
warning_msg = 'map2loop warning: The stratigraphic relationship: "' + \
c[0]+' overlies '+c[1] + \
'" was removed as it conflicts with another relationship'
warnings.warn(warning_msg)
except:
print('no cycles')
nx.write_gml(Gp, os.path.join(graph_path, 'ASUD_strat.gml'))
####################################
# combine multiple outputs into single graph that contains all infor needed by LoopStructural
#
# make_Loop_graph(tmp_path,output_path)
# tmp_path path to tmp directory
# output_path path to output directory
#
# Returns networkx graph
####################################
def make_Loop_graph(tmp_path,output_path,fault_orientation_clusters,fault_length_clusters,point_data,dtm_file,dst_crs,c_l,run_flags,config,bbox):
Gloop=nx.DiGraph()
geol=gpd.read_file(os.path.join(tmp_path, 'geol_clip.shp'))
strats=geol.drop_duplicates(subset=[c_l['c']])
strats[c_l['c']]=strats[c_l['c']].replace(' ','_').replace('-','_')
strats.set_index([c_l['c']],inplace=True)
# Load faults and stratigraphy
Gf = nx.read_gml(os.path.join(tmp_path, 'fault_network.gml'))
Astrat = pd.read_csv(os.path.join(tmp_path, 'all_sorts_clean.csv'), ",")
# add formation stratigraphy to graph as nodes
Astrat=Astrat.set_index('code')
for ind,s in Astrat.iterrows():
Gloop.add_node(s.name,s_colour=s['colour'],ntype="formation",
group=s['group'],
StratType=s['strat_type'],
uctype=s['uctype'],
GroupNumber=s['group number'],
IndexInGroup=s['index in group'],
NumberInGroup=s['number in group'],
MinAge=strats.loc[s.name][c_l['min']],
MaxAge=strats.loc[s.name][c_l['max']]
)
# add formation-formation stratigraphy to graph as edges
i=0
for ind,s in Astrat.iterrows():
if ind != Astrat.index[-1]:
Gloop.add_edge(Astrat.iloc[i].name,Astrat.iloc[i+1].name)
Gloop[Astrat.iloc[i].name][Astrat.iloc[i+1].name]['etype']='formation_formation'
i=i+1
#add faults to graph as nodes
Af_d = pd.read_csv(os.path.join(output_path, 'fault_dimensions.csv'), ",")
for ind,f in Af_d.iterrows():
Gloop.add_node(f['Fault'],ntype="fault")
#add fault centroid to node
Afgeom = pd.read_csv(os.path.join(output_path, 'faults.csv'), ",")
pd.to_numeric(Afgeom["X"], downcast="float")
pd.to_numeric(Afgeom["Y"], downcast="float")
pd.to_numeric(Afgeom["Z"], downcast="float")
for n in Gloop.nodes:
if( "Fault" in n):
subset=Afgeom[Afgeom['formation']==n]
Gloop.nodes[n]['Xmean']=subset['X'].mean()
Gloop.nodes[n]['Ymean']=subset['Y'].mean()
Gloop.nodes[n]['Zmean']=subset['Z'].mean()
for e in Gf.edges:
Gloop.add_edge(e[0],e[1])
Gloop[e[0]][e[1]]['Angle']=Gf.edges[e]['angle']
Gloop[e[0]][e[1]]['Topol']=Gf.edges[e]['topol']
Gloop[e[0]][e[1]]['etype']='fault_fault'
#add fault dimension info to fault nodes
Af_d = pd.read_csv(os.path.join(output_path, 'fault_dimensions.csv'), ",")
fault_l=pd.DataFrame(Af_d['Fault'])
Af_d=Af_d.set_index('Fault')
fault_l=fault_l.set_index('Fault')
fault_l['incLength']=Af_d['incLength']
for n in Gloop.nodes:
if("Fault" in n):
if( Af_d.loc[n].name in Gloop.nodes):
Gloop.nodes[n]['HorizontalRadius']=Af_d.loc[n]['HorizontalRadius']
Gloop.nodes[n]['VerticalRadius']=Af_d.loc[n]['VerticalRadius']
Gloop.nodes[n]['InfluenceDistance']=Af_d.loc[n]['InfluenceDistance']
Gloop.nodes[n]['IncLength']=Af_d.loc[n]['incLength']
Gloop.nodes[n]['f_colour']=Af_d.loc[n]['colour']
#add fault orientation info and clustering on orientation and length to fault nodes
Af_o = pd.read_csv(os.path.join(output_path, 'fault_orientations.csv'), ",")
Af_o=Af_o.drop_duplicates(subset="formation")
Af_o=Af_o.set_index('formation')
| pd.to_numeric(Af_o["dip"], downcast="float") | pandas.to_numeric |
import hdt
import gzip, sys, csv
import pandas as pd
import numpy as np
import kgbench as kg
from tqdm import tqdm
"""
Extracts target labels.
"""
def entity(ent):
"""
Returns the value of an entity separated from its datatype ('represented by a string')
:param ent:
:return:
"""
if ent.startswith('_'):
return ent, 'blank_node'
if ent.startswith('"'):
if '^^' in ent:
datatype, ent = ent[::-1].split('^^', maxsplit=1) # split once from the end
datatype, ent = datatype[::-1], ent[::-1]
datatype, ent = datatype[1:-1], ent[1:-1]
return ent, datatype
else:
return ent[1:-1], 'none'
else:
assert ent.startswith('http') or ent.startswith('file') or ent.startswith('urn') or ent.startswith('mailto')
# -- NB this assert only holds for this specific dataset.
return ent, 'uri'
## Map from dataset category to coarse-grained classes.
map = {}
map['http://purl.org/collections/nl/am/t-14592'] = 'Books and Documents'
# boekencollectie 3479
# Book collection
map['http://purl.org/collections/nl/am/t-15459'] = 'Decorative art'
# meubelcollectie 3206
# Furniture
map['http://purl.org/collections/nl/am/t-15573'] = 'Decorative art'
# glascollectie 1028
# Glass
map['http://purl.org/collections/nl/am/t-15579'] = 'Decorative art'
# textielcollectie 7366
# Textiles
map['http://purl.org/collections/nl/am/t-15606'] = 'Decorative art'
# keramiekcollectie 5152
map['http://purl.org/collections/nl/am/t-16469'] = 'Metallic art'
# onedele metalen collectie 797
# Non-noble metals
map['http://purl.org/collections/nl/am/t-22503'] = 'Prints'
# prentencollectie 22048
# Prints
map['http://purl.org/collections/nl/am/t-22504'] = 'Photographs'
# fotocollectie 1563
# Photographs
map['http://purl.org/collections/nl/am/t-22505'] = 'Drawings'
# tekeningencollectie 5455
# Drawings
map['http://purl.org/collections/nl/am/t-22506'] = 'Paintings'
# schilderijencollectie 2672
# Paintings
map['http://purl.org/collections/nl/am/t-22507'] = 'Decorative art'
# beeldencollectie 943
# Sculpture
map['http://purl.org/collections/nl/am/t-22508'] = 'Metallic art'
# edele metalencollectie 3533
# Noble metals
map['http://purl.org/collections/nl/am/t-22509'] = 'Historical artifacts'
# penningen- en muntencollectie 6440
# Coins etc.
map['http://purl.org/collections/nl/am/t-28650'] = 'Historical artifacts'
# archeologiecollectie 582
# Archeaological artifacts
map['http://purl.org/collections/nl/am/t-23765'] = 'Books and Documents'
# documentencollectie 533
# Document collection
map['http://purl.org/collections/nl/am/t-31940'] = 'Metallic art'
# -- Onedele collectie 3
# A small category containing only room numbers from a defunct men's club
map['http://purl.org/collections/nl/am/t-32052'] = 'Historical artifacts'
# -- maten en gewichtencollectie 536
# Measures and weight
map['http://purl.org/collections/nl/am/t-5504'] = 'Decorative art'
# -- kunstnijverheidcollectie 8087
# Arts and crafts
complete = hdt.HDTDocument('../../amfull/raw/am-combined.hdt')
# -- We use the AM combined data, because the target relations have already been stripped from amplus-all.xxx
# The class relation
rel = 'http://purl.org/collections/nl/am/objectCategory'
data = []
triples, c = complete.search_triples('', rel, '')
for i, (s, _, o) in enumerate(triples):
data.append([s, o])
df = pd.DataFrame(data, columns=['instance', 'label_original'])
df['cls_label'] = df.label_original.map(map)
df.cls_label = pd.Categorical(df.cls_label)
df['cls'] = df.cls_label.cat.codes
df.to_csv('all.csv', sep=',', index=False, header=True)
print('Created dataframe. Class frequencies:')
print(df.cls_label.value_counts(normalize=False))
sys.exit()
print(df.cls_label.value_counts(normalize=True))
# * Split train, validation and test sets
# fixed seed for deterministic output
np.random.seed(0)
meta_size = 20_000
test_size = 20_000
val_size = 20_000
train_size = len(df) - test_size - val_size - meta_size
assert train_size > 0
print(f'train {train_size}, val {val_size}, test {test_size}, meta {meta_size}')
bin = np.concatenate( [
np.full((train_size,), 0),
np.full((val_size,), 1),
np.full((test_size,), 2),
np.full((meta_size,), 3) ], axis=0)
np.random.shuffle(bin) # in place
train = df[bin == 0]
train.to_csv('training.csv', sep=',', index=False, header=True)
val = df[bin == 1]
val.to_csv('validation.csv', sep=',', index=False, header=True)
test = df[bin == 2]
test.to_csv('testing.csv', sep=',', index=False, header=True)
test = df[bin == 3]
test.to_csv('meta-testing.csv', sep=',', index=False, header=True)
print('created train, val, test, meta split.')
print('Creating dictionaries.')
stripped = hdt.HDTDocument('amplus-stripped.hdt')
entities = set()
relations = set()
datatypes = set()
triples, c = stripped.search_triples('', '', '')
for s, p, o in tqdm(triples, total=c):
datatypes.add(entity(s)[1])
datatypes.add(entity(o)[1])
i2d = list(datatypes)
i2d.sort()
d2i = {d:i for i, d in enumerate(i2d)}
triples, c = stripped.search_triples('', '', '')
for s, p, o in tqdm(triples, total=c):
se, sd = entity(s)
oe, od = entity(o)
entities.add((se, sd))
entities.add((oe, od))
relations.add(p)
i2e = list(entities)
i2r = list(relations)
i2e.sort(); i2r.sort()
df = pd.DataFrame(enumerate(i2d), columns=['index', 'datatype'])
df.to_csv('datatypes.int.csv', index=False, header=True)
ent_data = [(i, dt, ent) for i, (ent, dt) in enumerate(i2e)]
df = | pd.DataFrame(ent_data, columns=['index', 'datatype', 'label']) | pandas.DataFrame |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta( | Series(["00:00:01", "00:00:03"], name="foo") | pandas.Series |
import pandas as pd
import numpy as np
import glob
import sys
import re
from scipy import interpolate
from astropy.cosmology import Planck15 as cosmo
from astropy.cosmology import z_at_value
import astropy.units as u
from cosmic.evolve import Evolve
from cosmic.sample.initialbinarytable import InitialBinaryTable
#----------------------------------------------------------------------------------
## phsyical constants
c = 2.99e10 ## speed of light in cm/s
secyr = 3.154e7 ## seconds per year
Myr = 1e6 ## years per Myr
Msun = 1.989e33 ## grams per solar mass
Lsun = 3.839e33 ## erg/sec per solar luminosity
#-----------------------------------------------------------------------------------
## analytic approximation for P(omega) from Dominik et al. 2015
def P_omega(omega_values):
return 0.374222*(1-omega_values)**2 + 2.04216*(1-omega_values)**4 - 2.63948*(1-omega_values)**8 + 1.222543*(1-omega_values)**10
#----------------------------------------------------------------------------------
### Monte Carlo sampling for detections above the given SNR threshold \
def calc_detection_prob(m1, m2, z_merge):
## constants that reflect LIGO design sensitivity
d_L8 = 1 ## in Gpc
M_8 = 10 ## in Msun \
SNR_thresh = 8
## approximate typical SNR from Fishbach et al. 2018
M_chirp = (m1*m2)**(3./5)/(m1+m2)**(1./5)
d_C = cosmo.comoving_distance(z_merge).to(u.Gpc).value
d_L = (1+z_merge)*d_C
rho_0 = 8*(M_chirp*(1+z_merge)/M_8)**(5./6)*d_L8/d_L ## this is the "typical/optimal" SNR
if (rho_0 < SNR_thresh): return 0
## sample omega according to distribution for omega via inverse CDF method
dist_size = 10000
sample_size = 1000
P_omega_dist = P_omega(np.linspace(0, 1, dist_size))
inv_P_omega = interpolate.interp1d(P_omega_dist, np.linspace(0, 1, dist_size), fill_value="extrapolate")
omega = inv_P_omega(np.random.uniform(0, 1, sample_size))
## find the true SNRs given sky location
rho = omega*rho_0
accept_SNR_num = len(rho[np.where(rho >= SNR_thresh)])
p_det = accept_SNR_num/sample_size
return p_det
#-----------------------------------------------------------------------------------#
def calc_flux(current_BH_mass, initial_BH_mass, mdot_BH, d_L):
bolometric_correction = 0.8
where_lower_masses = current_BH_mass < np.sqrt(6)*initial_BH_mass
eta_lower_masses = 1 - np.sqrt(1-(current_BH_mass/(3*initial_BH_mass))**2)
eta = np.where(where_lower_masses, eta_lower_masses, 0.42)
acc_rate = mdot_BH/(1-eta) ## accretion rate in Msun/year
luminosity = bolometric_correction*eta*acc_rate*c**2*Msun/secyr ## accretion luminosity in erg/sec
flux = luminosity/(4 * np.pi * d_L**2) ## flux in erg/s/cm^2
return flux
#----------------------------------------------------------------------------------
columns=['bin_num', 'metallicity', 'merger_type', 'bin_state', 'delay_time', 'lookback_time', 'z_f', 'p_det', 'p_cosmic', 'merge_by_z0', 'ZAMS_mass_k1','ZAMS_mass_k2', 'remnant_mass_k1', 'remnant_mass_k2', 'final_k1', 'final_k2', 'BH_mass_i', 'donor_mass_i', 'donor_type', 'XRB_sep_i', 'XRB_porb_i', 'emit11', 'emit13', 'emit15', 'emit_tot', 'this_BBH', 'this_BBHm', 'this_HMXB']
df_all = | pd.DataFrame(columns=columns) | pandas.DataFrame |
# noinspection PyPackageRequirements
import datawrangler as dw
import numpy as np
import pandas as pd
from .common import Manipulator
# noinspection PyShadowingBuiltins
def fitter(data, axis=0):
if axis == 1:
return dw.core.update_dict(fitter(data.T, axis=0), {'transpose': True})
elif axis != 0:
raise ValueError('axis must be either 0 or 1')
mean = pd.Series(index=data.columns)
std = | pd.Series(index=data.columns) | pandas.Series |
import pandas as pd
import dropbox
from tqdm import tqdm
from dropbox import DropboxOAuth2FlowNoRedirect
'''
This sets up a dropbox OAuthed client
'''
APP_KEY = 'xxx'
APP_SECRET = 'xxx'
auth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, APP_SECRET)
authorize_url = auth_flow.start()
print("1. Go to: " + authorize_url)
print("2. Click \"Allow\" (you might have to log in first).")
print("3. Copy the authorization code.")
auth_code = input("Enter the authorization code here: ").strip()
try:
oauth_result = auth_flow.finish(auth_code)
except Exception as e:
print('Error: %s' % (e,))
exit(1)
with dropbox.Dropbox(oauth2_access_token=oauth_result.access_token) as dbx:
dbx.users_get_current_account()
print("Successfully set up client!")
'''
This sets up the varibles needed
'''
mode = input('\nWould you like to:\na: Create or replace content\nb: Delete content\n').strip()
print('\nFor folder\nThe input should be /folder1/folder2/\n')
print('\nFor files\nThe input should be /folder1/folder2/file1.ex\n')
child = input("Enter the child folders' location here: ").strip()
mContent = input("Enter the master content's location here: ").strip()
mName = mContent.split('/')[-2]
Spreadsheet = input("Enter the Spreadsheet's location here: ").strip()
#Reads speadsheet
metadata, res = dbx.files_download(path= Spreadsheet)
sht = pd.read_excel(res.content)
#Create relocation list
entries = | pd.DataFrame([]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import random
# Date and Time
# =============
print(datetime.datetime(2000, 1, 1))
print(datetime.datetime.strptime("2000/1/1", "%Y/%m/%d"))
print(datetime.datetime(2000, 1, 1, 0, 0).strftime("%Y%m%d"))
# to_datetime
# ===========
print(pd.to_datetime("4th of July"))
print(pd.to_datetime("13.01.2000"))
print(pd.to_datetime("7/8/2000"))
print(pd.to_datetime("7/8/2000", dayfirst=True))
print(issubclass(pd.Timestamp, datetime.datetime))
ts = pd.to_datetime(946684800000000000)
print(ts.year, ts.month, ts.day, ts.weekday())
index = [pd.Timestamp("2000-01-01"),
pd.Timestamp("2000-01-02"),
pd.Timestamp("2000-01-03")]
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts)
print(ts.index)
ts = pd.Series(np.random.randn(len(index)),
index=["2000-01-01", "2000-01-02", "2000-01-03"])
print(ts.index)
index = pd.to_datetime(["2000-01-01", "2000-01-02", "2000-01-03"])
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts.index)
print(pd.date_range(start="2000-01-01", periods=3, freq='H'))
print(pd.date_range(start="2000-01-01", periods=3, freq='T'))
print(pd.date_range(start="2000-01-01", periods=3, freq='S'))
print(pd.date_range(start="2000-01-01", periods=3, freq='B'))
print( | pd.date_range(start="2000-01-01", periods=5, freq='1D1h1min10s') | pandas.date_range |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import math
import json
import datetime
import matplotlib.dates as mdates
import os.path
import pickle
sns.set(context='paper', style={'axes.axisbelow': True,
'axes.edgecolor': '.8',
'axes.facecolor': 'white',
'axes.grid': True,
'axes.labelcolor': '.15',
'axes.linewidth': 0.5,
'figure.facecolor': 'white',
'font.family': [u'sans-serif'],
'font.sans-serif': [u'Abel'],
'font.weight' : u'light',
'grid.color': '.8',
'grid.linestyle': u'-',
'image.cmap': u'Greys',
'legend.frameon': True,
'legend.numpoints': 1,
'legend.scatterpoints': 1,
'lines.solid_capstyle': u'butt',
'text.color': '.15',
'xtick.color': '.15',
'xtick.direction': u'out',
'xtick.major.size': 0.0,
'xtick.minor.size': 0.0,
'ytick.color': '.15',
'ytick.direction': u'out',
'ytick.major.size': 0.0,
'ytick.minor.size': 0.0}, font_scale = 2)
flatui = ['#28aad5', '#b24d94', '#38ae97' ,'#ec7545']
def minutes_second_formatter(value, tick_number):
m, s = divmod(value / 1000.0, 60)
return '%02d:%02d' % (m, s)
def second_formatter(value, tick_number):
v = int(value / 1000.0)
if value == 0 or v > 0:
return v
else:
return '{:.1f}'.format(value / 1000.0)
def loadPrecisionRecallData(fn):
d = {'Batch': [], 'Run': [], 'Precision' : [], 'Recall' : [] }
data = json.load(open(fn))
run_cnt = 0
for run in data['Runs']:
tp = run['TruePositivesPerBatch']
fp = run['FalsePositivesPerBatch']
fn = run['FalseNegativesPerBatch']
for i in range(len(tp)):
d['Precision'].append(float(tp[i]) / (float(tp[i]) + float(fp[i])))
d['Recall'].append(float(tp[i]) / (float(tp[i]) + float(fn[i])))
d['Batch'].append(i)
d['Run'].append(run_cnt)
run_cnt += 1
return d
def loadMemoryData(fn):
d = {'Time': [], 'Memory': [], 'Run': []}
data = json.load(open(fn))
run_cnt = 0
step = 2000
for run in data['Runs']:
bt = run['MemoryUsagePerTimeInMillis']
interval = 0
interval_values = []
times = [float(t) for t in bt]
min_t = 0
max_t = np.max(times)
d['Run'].append(run_cnt)
d['Time'].append(0)
d['Memory'].append(0)
for bin in np.arange(0, max_t, step):
values = []
for t in bt:
tf = float(t)
gb = bt[t] / 1073741824.0
if tf < bin and tf >= bin - step:
values.append(gb)
if len(values) > 0:
d['Run'].append(run_cnt)
d['Time'].append(int((bin)))
d['Memory'].append(np.mean(values))
run_cnt += 1
return d
def loadTruePositives(fn):
data = json.load(open(fn))
return data['Runs'][0]['TruePositivesPerBatch']
def loadRawData(fn, dataLabel, yLabel):
d = {'Batch': [], 'Run': [], yLabel : []}
data = json.load(open(fn))
run_cnt = 0
for run in data['Runs']:
data = run[dataLabel]
for i in range(len(data)):
d[yLabel].append(np.mean(data[i]))
d['Batch'].append(i)
d['Run'].append(run_cnt)
run_cnt += 1
return d
def loadErrorData(fn):
d = {'Batch': [], 'Run': [], 'MeanError' : []}
data = json.load(open(fn))
run_cnt = 0
for run in data['Runs']:
data = run['Errors']
for i in range(len(data)):
d['MeanError'].append(data[i] * 100.0)
d['Batch'].append(i)
d['Run'].append(run_cnt)
run_cnt += 1
return d
def loadAbsoluteErrorData(fn):
d = {'Batch': [], 'Run': [], 'MeanAbsoluteError' : [], 'MaxAbsoluteError' : [], 'TheoreticalAbsoluteError' : []}
data = json.load(open(fn))
run_cnt = 0
for run in data['Runs']:
data = run['AbsoluteErrorsPerBatch']
for i in range(len(data)):
dat = [x for x in data[i]]
d['MeanAbsoluteError'].append(np.mean(dat))
d['MaxAbsoluteError'].append(np.max(dat))
d['TheoreticalAbsoluteError'].append(run['Errors'][i] / 2.0)
d['Batch'].append(i)
d['Run'].append(run_cnt)
run_cnt += 1
return d
def loadNormalizedErrorData(fn):
d = {'Batch': [], 'Run': [], 'MeanNormalizedError' : [], 'MaxNormalizedError' : []}
data = json.load(open(fn))
run_cnt = 0
for run in data['Runs']:
data = run['NormalizedErrorsPerBatch']
for i in range(len(data)):
dat = [x * 100.0 for x in data[i]]
d['MeanNormalizedError'].append(np.mean(dat))
d['MaxNormalizedError'].append(np.max(dat))
d['Batch'].append(i)
d['Run'].append(run_cnt)
run_cnt += 1
return d
def plot_ts(df, ax, color, yLabel, xLabel, width, label, linestyle = '-', showMax = False, displayCumsum = False):
valid_y1 = []
valid_y2 = []
valid_x = []
line_x = []
line_y = []
single_x = []
single_y = []
count_never = True
max_batch = df[xLabel].max() + 1
min_batch = df[xLabel].min()
steps = []
if width > 0:
steps = range(min_batch, max_batch, width)
else:
steps = [t for t in df[xLabel]]
cumsum = 0
for b in steps:
q = df.query(xLabel + ' >= ' + str(b) + ' and ' + xLabel + ' < ' + str(b + width))
if width == -1:
q = df.query(xLabel + ' == ' + str(b))
mean = q[yLabel].mean()
if displayCumsum:
cumsum = cumsum + mean
mean = cumsum
max = q[yLabel].max()
std = q[yLabel].std()
count = q[yLabel].count()
if count > 1:
if not showMax:
valid_x.append(b)
valid_y1.append(mean - 1.96 * (std / math.sqrt(count)))
valid_y2.append(mean + 1.96 * (std / math.sqrt(count)))
line_y.append(mean)
line_x.append(b)
else:
line_y.append(max)
line_x.append(b)
if not showMax:
ax.fill_between(valid_x, valid_y1, valid_y2, facecolor=color, alpha=0.4, interpolate=True)
ax.plot(line_x, line_y, color=color, linewidth=3, linestyle=linestyle, label=label)#, label=yLabel)
def getRuntime(fn):
data = json.load(open(fn))
runtimes = []
for run in data['Runs']:
runtimes.append(run['TotalRuntimeInMillis'])
return runtimes
def main(runtimes, id, file_id, title, show):
prefixspan = '../n_ps-' + id + '-20k.json'
iprefixspan = '../n_ips-' + id + '-10k.json'
# eval runtime
runtimes_ps = getRuntime(prefixspan)
runtimes_ips = getRuntime(iprefixspan)
print(title, np.mean(runtimes_ps), np.std(runtimes_ps), np.mean(runtimes_ips), np.std(runtimes_ips))
runtimes['prefixspan'][title] = runtimes_ps
runtimes['prosecco'][title] = runtimes_ips
tp = loadTruePositives(prefixspan)
file_id = file_id + '-' + str(tp[0])
# MEMORY
f, (ax1) = plt.subplots(1, 1, sharey=False, figsize=(5, 3.5))
d = loadMemoryData(iprefixspan)
df = pd.DataFrame(data=d)
df = df.sort_values(by=['Time'])
plot_ts(df, ax1, flatui[0], 'Memory', 'Time', -1, 'ProSecCo', linestyle = '-')
d = loadMemoryData(prefixspan)
df = pd.DataFrame(data=d)
df = df.sort_values(by=['Time'])
plot_ts(df, ax1, flatui[1], 'Memory', 'Time', -1, 'PrefixSpan', linestyle = ':')
ax1.set_ylabel('Memory (GB)')
ax1.set_xlabel('Time (mm:ss)')
ax1.xaxis.set_major_formatter(plt.FuncFormatter(minutes_second_formatter))
#if title == 'ACCIDENTS-0.80'
# lgegend = ax1.legend(loc='upper right')
legend = ax1.legend(loc='best')
legend.get_frame().set_facecolor('#ffffff')
legend.get_frame().set_linewidth(0.0)
f.savefig('../fig/' + file_id + '-memory.pdf', bbox_inches='tight')
#f.savefig('../fig/' + file_id + '-memory.svg', bbox_inches='tight')
plt.tight_layout()
if show:
plt.show()
plt.close('all')
# ERROR
f, (ax2) = plt.subplots(1, 1, sharey=False, figsize=(5, 3.5))
d = loadNormalizedErrorData(iprefixspan)
df = pd.DataFrame(data=d)
df = df.sort_values(by=['Batch'])
plot_ts(df, ax2, flatui[0], 'MeanNormalizedError', 'Batch', 1, 'Mean', '-')
plot_ts(df, ax2, flatui[0], 'MaxNormalizedError', 'Batch', 1, 'Max', ':', True)
ax2.set_ylabel('Relative Percentage Error')
ax2.set_xlabel('Block')
legend = ax2.legend(loc='upper right')
legend.get_frame().set_facecolor('#ffffff')
legend.get_frame().set_linewidth(0.0)
f.savefig('../fig/' + file_id + '-error.pdf', bbox_inches='tight')
plt.tight_layout()
if show:
plt.show()
plt.close('all')
# Precision Recall
f, (ax3) = plt.subplots(1, 1, sharey=False, figsize=(5, 3.5))
d = loadPrecisionRecallData(iprefixspan)
df = pd.DataFrame(data=d)
df = df.sort_values(by=['Batch'])
plot_ts(df, ax3, flatui[0], 'Precision', 'Batch', 1, 'Precision')
plot_ts(df, ax3, flatui[0], 'Recall', 'Batch', 1, 'Recall', ':')
ax3.set_ylabel('Precision and Recall')
ax3.set_xlabel('Block')
legend = ax3.legend(loc='lower right')
legend.get_frame().set_facecolor('#ffffff')
legend.get_frame().set_linewidth(0.0)
ax3.set_ylim([0.0, 1.1])
f.savefig('../fig/' + file_id + '-precision_recall.pdf', bbox_inches='tight')
plt.tight_layout()
if show:
plt.show()
plt.close('all')
# RuntimePerBatch
if False:
f, (ax4) = plt.subplots(1, 1, sharey=False, figsize=(5, 3.5))
d = loadRawData(iprefixspan, 'RuntimePerBatch', 'RuntimePerBatch')
df = pd.DataFrame(data=d)
df = df.sort_values(by=['Batch'])
plot_ts(df, ax4, flatui[0], 'RuntimePerBatch', 'Batch', 1, 'ProSecCo', displayCumsum=False, linestyle = '-')
ax4.set_ylabel('Time (s)')
ax4.set_xlabel('Block')
ax4.yaxis.set_major_formatter(plt.FuncFormatter(second_formatter))
legend = ax4.legend(loc='best')
legend.get_frame().set_facecolor('#ffffff')
legend.get_frame().set_linewidth(0.0)
ax4.set_ylim(bottom=0)
f.savefig('../fig/' + file_id + '-batch_runtime.pdf', bbox_inches='tight')
plt.tight_layout()
if show:
plt.show()
plt.close('all')
# ABS-ERROR
f, (ax5) = plt.subplots(1, 1, sharey=False, figsize=(5, 3.5))
d = loadAbsoluteErrorData(iprefixspan)
df = | pd.DataFrame(data=d) | pandas.DataFrame |
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
interval_label = 'invalid'
observation = default_observation(
site_metadata, interval_length='5min')
object.__setattr__(observation, 'interval_label', interval_label)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert "invalid interval_label" in str(excinfo.value)
def test_persistence_scalar_index_low_solar_elevation(
site_metadata, powerplant_metadata):
interval_label = 'beginning'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
# at ABQ Baseline, solar apparent zenith for these points is
# 2019-05-13 12:00:00+00:00 91.62
# 2019-05-13 12:05:00+00:00 90.09
# 2019-05-13 12:10:00+00:00 89.29
# 2019-05-13 12:15:00+00:00 88.45
# 2019-05-13 12:20:00+00:00 87.57
# 2019-05-13 12:25:00+00:00 86.66
tz = 'UTC'
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
index = pd.date_range(start=data_start, end=data_end,
freq='5min', closed='left')
# clear sky 5 min avg (from 1 min avg) GHI is
# [0., 0.10932908, 1.29732454, 4.67585122, 10.86548521, 19.83487399]
# create data series that could produce obs / clear of
# 0/0, 1/0.1, -1/1.3, 5/5, 10/10, 20/20
# average without limits is (10 - 1 + 1 + 1 + 1) / 5 = 2.4
# average with element limits of [0, 2] = (2 + 0 + 1 + 1 + 1) / 5 = 1
data = pd.Series([0, 1, -1, 5, 10, 20.], index=index)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start=forecast_start, end=forecast_end, freq='5min', closed='left')
# clear sky 5 min avg GHI is
# [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected_vals = [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected = pd.Series(expected_vals, index=expected_index)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
expected = pd.Series([0.2, 0.7, 1.2, 1.6, 2., 2.5], index=expected_index)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0, 0, 0, 20, 20, 20], 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 0, 0, 4, 4, 4], 'y', [50], [2]),
# invalid axis
pytest.param([0, 0, 0, 4, 4, 4], 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic(site_metadata, interval_label, obs_values,
axis, constant_values, expected_values):
tz = 'UTC'
interval_length = '5min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='5min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 11 + [4] * 11, 'y', [50], [2]),
# invalid axis
pytest.param([0] * 11 + [4] * 11, 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
# insufficient observation data
pytest.param([5.3, 7.3, 1.4] * 4, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([], 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([None]*10, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic_timeofday(site_metadata, obs_values, axis,
constant_values, expected_values):
tz = 'UTC'
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_end = pd.Timestamp('20190513T0900', tz=tz)
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_start = pd.Timestamp('20190514T0900', tz=tz)
forecast_end = pd.Timestamp('20190514T1000', tz=tz)
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("data_end,forecast_start", [
# no timezone
(pd.Timestamp("20190513T0900"), pd.Timestamp("20190514T0900")),
# same timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# different timezone
(
pd.Timestamp("20190513T0200", tz="US/Pacific"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# obs timezone, but no fx timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900")
),
# no obs timezone, but fx timezone
(
pd.Timestamp("20190513T0900"),
pd.Timestamp("20190514T0900", tz="UTC")
),
])
def test_persistence_probabilistic_timeofday_timezone(site_metadata, data_end,
forecast_start):
obs_values = [0] * 11 + [20] * 11
axis, constant_values, expected_values = 'x', [10, 20], [50, 100]
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_end = forecast_start + pd.Timedelta("1h")
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
# if forecast without timezone, then use obs timezone
if data.index.tzinfo is not None and forecast_start.tzinfo is None:
expected_index = expected_index.tz_localize(data.index.tzinfo)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 15 + [20] * 15, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 15 + [4] * 15, 'y', [50], [2]),
([None] * 30, 'y', [50], [None]),
([0] * 10 + [None] * 10 + [20] * 10, 'x', [10, 20], [50, 100]),
([0] * 10 + [None] * 10 + [4] * 10, 'y', [50], [2]),
])
def test_persistence_probabilistic_resampling(
site_metadata,
interval_label,
obs_values, axis,
constant_values,
expected_values
):
tz = 'UTC'
interval_length = '1min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
# all observations 9-10 each day.
# This index is for (09:00, 10:00] (interval_label=ending), but subtract
# 30 minutes for [09:00, 10:00) (interval_label=beginning)
PROB_PERS_TOD_OBS_INDEX = pd.DatetimeIndex([
'2019-04-21 09:30:00+00:00', '2019-04-21 10:00:00+00:00',
'2019-04-22 09:30:00+00:00', '2019-04-22 10:00:00+00:00',
'2019-04-23 09:30:00+00:00', '2019-04-23 10:00:00+00:00',
'2019-04-24 09:30:00+00:00', '2019-04-24 10:00:00+00:00',
'2019-04-25 09:30:00+00:00', '2019-04-25 10:00:00+00:00',
'2019-04-26 09:30:00+00:00', '2019-04-26 10:00:00+00:00',
'2019-04-27 09:30:00+00:00', '2019-04-27 10:00:00+00:00',
'2019-04-28 09:30:00+00:00', '2019-04-28 10:00:00+00:00',
'2019-04-29 09:30:00+00:00', '2019-04-29 10:00:00+00:00',
'2019-04-30 09:30:00+00:00', '2019-04-30 10:00:00+00:00',
'2019-05-01 09:30:00+00:00', '2019-05-01 10:00:00+00:00',
'2019-05-02 09:30:00+00:00', '2019-05-02 10:00:00+00:00',
'2019-05-03 09:30:00+00:00', '2019-05-03 10:00:00+00:00',
'2019-05-04 09:30:00+00:00', '2019-05-04 10:00:00+00:00',
'2019-05-05 09:30:00+00:00', '2019-05-05 10:00:00+00:00',
'2019-05-06 09:30:00+00:00', '2019-05-06 10:00:00+00:00',
'2019-05-07 09:30:00+00:00', '2019-05-07 10:00:00+00:00',
'2019-05-08 09:30:00+00:00', '2019-05-08 10:00:00+00:00',
'2019-05-09 09:30:00+00:00', '2019-05-09 10:00:00+00:00',
'2019-05-10 09:30:00+00:00', '2019-05-10 10:00:00+00:00',
'2019-05-11 09:30:00+00:00', '2019-05-11 10:00:00+00:00',
'2019-05-12 09:30:00+00:00', '2019-05-12 10:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
@pytest.mark.parametrize('obs_interval_label_index', [
('beginning', PROB_PERS_TOD_OBS_INDEX - pd.Timedelta('30min')),
('ending', PROB_PERS_TOD_OBS_INDEX)
])
@pytest.mark.parametrize('fx_interval_label_index', [
('beginning', pd.DatetimeIndex(['20190514T0900Z'], freq='1h')),
('ending', pd.DatetimeIndex(['20190514T1000Z'], freq='1h'))
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
# intervals always average to 10 if done properly, but 0 or 20 if
# done improperly
([0, 20] * 22, 'x', [10, 20], [100., 100.]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 4] * 22, 'y', [50], [2.]),
# works with nan
([None, 4] * 22, 'y', [50], [4.]),
([0.] + [None] * 42 + [4.], 'y', [50], [2.]),
# first interval averages to 0, last to 20, else nan
([0.] + [None] * 42 + [20.], 'x', [10, 20], [50., 100.]),
])
def test_persistence_probabilistic_timeofday_resample(
site_metadata,
obs_values,
axis,
constant_values,
expected_values,
obs_interval_label_index,
fx_interval_label_index
):
obs_interval_label, obs_index = obs_interval_label_index
fx_interval_label, fx_index = fx_interval_label_index
tz = 'UTC'
observation = default_observation(
site_metadata,
interval_length='30min',
interval_label=obs_interval_label
)
data_start = pd.Timestamp('20190421T0900', tz=tz)
data_end = pd.Timestamp('20190512T1000', tz=tz)
data = | pd.Series(obs_values, index=obs_index, dtype=float) | pandas.Series |
# -*- coding: utf-8 -*-
"""FINAL PROJECT.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bMDq2WSLnIYa4O1Eq-xw65RoksCOSIh6
# FINAL PROJECT
## A. UNDERSTAND DATA
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
df = | pd.read_csv('Data_Negara_HELP.csv') | pandas.read_csv |
import pandas
import sqlite3
import datetime
import requests
import zipfile
import io
import subprocess
from fantasy_machine import config
from fantasy_machine import data_ops
class update_data(object):
def __init__(self):
pass
def main(self):
start = datetime.datetime.now()
print('Beginning Update: {}'.format(start.strftime('%Y-%m-%d %H:%M')))
self.update_lineups()
self.update_eventdata()
end = datetime.datetime.now()
return 'Update Completed in {}'.format((end-start).strftime('%H:%M:%S'))
def update_lineups(self):
# Daily lineup fetch
gl = data_ops.scraping.daily_lineups()
today_date = datetime.datetime.today().strftime('%Y-%m-%d')
pull_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
lineups = pandas.DataFrame.from_dict(gl.main()).drop_duplicates()
# Add dates to dataframe
lineups['game_date'] = today_date
lineups['pull_time'] = pull_time
# Correct Teams
lineups['team'] = lineups['team'].replace('LAA','ANA')
# Push dataframe to lineups database
con = sqlite3.connect(config.players_db_path)
lineups.to_sql(config.lineups_table, con, if_exists='append', index=False)
con.close()
print('Daily Lineup Table Updated')
def update_eventdata(self):
current_year = datetime.datetime.now().strftime('%Y')
self.download_eventfiles(current_year=current_year)
df = self.convert_raw_eventfiles(current_year=current_year)
return len(df)
def convert_raw_eventfiles(self, current_year):
bash_command = "wine ./data/temp/bevent.exe -y {} -f 0-96 ./data/temp/{}???.EV? > data/temp/merged.csv".format(current_year,
current_year) ## Fix Pathing
subprocess.call(bash_command, shell=True)
df = pandas.read_csv("{}/merged.csv".format(temp_data_dir)) ## Fix Pathing
return df
def download_eventfiles(self, current_year):
base_url = 'https://www.retrosheet.org/events/{current_year}eve.zip'
req_url = base_url.format(current_year=current_year)
r = requests.get(req_url, config.temp_data_dir)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(config.temp_data_dir)
return 0
def check_missing_games(self, current_year):
con = sqlite3.connect(config.mlb_db_path)
query = """SELECT DISTINCT gameid FROM {} WHERE gameid like '%{}%'""".format(config.mlb_event_table,
current_year),
existing_gids = | pandas.read_sql_query(query ,con) | pandas.read_sql_query |
import configparser
import os
from os.path import exists
from datetime import datetime, timedelta
import sys
from time import time
import pandas as pd
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QProcess, Qt, QThread, QTimer
from mainUi import Ui_MainWindow
from pandascontroller import DomainInput, DomainsTableModel
from apscheduler.schedulers.qt import QtScheduler
class worker(QtCore.QObject): # Worker object for auto scan
startScan = QtCore.pyqtSignal(str, str)
updateConsole = QtCore.pyqtSignal(str)
updateCurrentDay = QtCore.pyqtSignal(int)
autoCheck = QtCore.pyqtSignal(bool)
def __init__(self, parent=None):
super(worker, self).__init__(parent)
def automateScan(self, date):
scanningSchedule = QtScheduler(timezone="UTC")
days = ["day1", "day2", "day3", "day4", "day5"]
weeks = ["Week 1", "Week 2", "Week 3 ", "Week 4"]
i = 0
date = date.toPyDate()
#print(datetime.utcnow())
startOfWeek = date
for week in weeks:
for day in days:
i += 1
i = str(i)
#print(date, datetime.today().strftime("%Y-%m-%d"))
#calculates when each daily job should be executed and adds to scheduler
if int(i) == 1 and str(date) == datetime.today().strftime("%Y-%m-%d"):
#print("first day")
scanningSchedule.add_job(self.startScan.emit, 'interval', args=[f"{day}", f"{week}"], days=28, start_date=date, next_run_time=datetime.utcnow(), id=i)
else:
scanningSchedule.add_job(self.startScan.emit, 'interval', args=[f"{day}", f"{week}"], days=28, start_date=date, id=i)
date = date + timedelta(days=1)
i=int(i)
if i % 5 == 0:
date = startOfWeek + timedelta(days=7)
startOfWeek = date
scanningSchedule.start()
class window(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
global thread
thread = QtCore.QThread(self)
thread.start()
self.worker = worker()
self.worker.moveToThread(thread)
self.process = QProcess()
#Attempts to read the csv
global domains_path
global config_path
domains_name = 'domains/domains.csv'
config_name = 'shellScripts/wpwatcher.conf'
if getattr(sys,'frozen',False):
application_path = sys._MEIPASS
else:
application_path = os.path.dirname(os.path.abspath(__file__))
domains_path = os.path.join(application_path, domains_name)
config_path = os.path.join(application_path, config_name)
print(domains_path)
try:
self.data = pd.read_csv(domains_path, dtype=object)
except:
# Adds column headers if none detected
columnHeaders = ["day1","day2","day3","day4","day5","day1.1","day2.1","day3.1","day4.1","day5.1","day1.2","day2.2","day3.2","day4.2","day5.2","day1.3","day2.3","day3.3","day4.3","day5.3"]
self.data = | pd.DataFrame(None, columns=columnHeaders, dtype=object) | pandas.DataFrame |
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
#-------------------------------------------------------------------------------------------------------------------------------
# By <NAME> (August 2018)
#
# Plot heatmap of gene expression data as environment change from high to low oxygen levels
#
# Dataset: Pseudomonas aeruginosa gene expression compendium referenced in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5069748/
#
# Use map_file to select only those samples from the oxygen level experiment
#
#-------------------------------------------------------------------------------------------------------------------------------
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
randomState = 123
from numpy.random import seed
seed(randomState)
# In[3]:
# load arguments
data_file = os.path.join(os.path.dirname(os.getcwd()), "data", "all-pseudomonas-gene-normalized.zip") # repo file is zipped
map_file = os.path.join(os.path.dirname(os.getcwd()), "metadata", "mapping_oxy.txt")
PA1673like_file = os.path.join(os.path.dirname(os.getcwd()), "output", "PA1673_like_genes_v1.txt")
# In[4]:
# read in data
data = pd.read_table(data_file, header=0, sep='\t', index_col=0, compression='zip')
X = data.transpose()
X.head(5)
# In[5]:
# read in metadata file containing sample ids for dataset to consider (i.e. oxygen level experiment: E-GEOD-52445)
grp = pd.read_table(map_file, header=0, sep='\t', index_col=None)
grp
# In[6]:
# select only those rows the experiment under focus
# ordering based on timecourse experiment (high oxygen --> low oxygen)
timeline = ['maxO2', 't5', 't10', 't15', 't20', 't25', 't30', 't35', 't40', 't50', 't60', 't70', 't80', 'minO2']
dataset = pd.DataFrame()
for index, row in grp.iterrows():
if row['Phenotype'] == timeline[index]:
sample = str(row['Sample ID'])
dataset = dataset.append(X[X.index.str.contains(sample, regex=False)])
dataset = dataset.T
dataset.shape
# In[7]:
# Heat map of all genes
plt.figure(figsize=(10, 50))
sns.heatmap(dataset[4000:5000], yticklabels=False, cmap="RdBu_r", annot=False)
#sns.clustermap(dataset)
# In[8]:
# Heat map of gene PA1673 (known to be solely regulated by Anr TF so expect it should be
# linearly turned on as Anr turns on when oxygen levels decrease)
#sns.heatmap(dataset['PA1673'], annot=True)
plt.figure(figsize=(10, 1))
PA1673_exp = dataset[dataset.index == 'PA1673']
sns.heatmap(PA1673_exp, annot = True, cmap = "RdBu_r")
# In[9]:
# Use pearson correlation score to compare PA1673 profile with all other genes
# Select genes that have the highest 95% person correlation score as being "PA1673-like"
corr_score = []
ref_gene = np.reshape(PA1673_exp.values, (PA1673_exp.shape[1],))
for i in range(0,dataset.shape[0]):
corr_score.append(pearsonr(ref_gene, dataset.iloc[i].values))
corr_score_df = | pd.DataFrame(corr_score, index=dataset.index, columns=['Pearson', 'Pvalue']) | pandas.DataFrame |
from pathlib import Path
from matplotlib.font_manager import FontProperties
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
grandpadir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0, grandpadir)
from models.OutlierDetector import Detector
from baselines.posthoc_explanation_methods import ExplanationMethods
from configpkg import ConfigMger, DatasetConfig
from holders.Dataset import Dataset
from pipeline.automl.automl_processor import AutoML
from utils import metrics
from utils.helper_functions import read_nav_files, sort_files_by_dim
from utils.pseudo_samples import PseudoSamplesMger
from utils.shared_names import FileKeys, FileNames
from analysis.comparison.comparison_utils import load_baseline_explanations, get_dataset_name
from collections import OrderedDict
import json
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
pipeline = 'results_predictive_grouping'
expl_size = 10
noise_level = None
fs_methods = 5
datasets = {
'wbc': {'name': 'Wisconsin Breast Cancer', 'dims': ['30d (0%)', '42d (30%)', '75d (60%)', '300d (90%)']},
'ionosphere': {'name': 'Ionosphere', 'dims': ['33d (0%)', '47d (30%)', '82d (60%)', '330d (90%)']},
'arrhythmia': {'name': 'Arrhythmia', 'dims': ['257d (0%)', '367d (30%)', '642d (60%)', '2570d (90%)']}
}
test_confs = [
{'path': Path('..', pipeline, 'lof'), 'detector': 'lof', 'type': 'test'},
{'path': Path('..', pipeline, 'lof'), 'detector': 'lof', 'type': 'test'},
{'path': Path('..', pipeline, 'lof'), 'detector': 'lof', 'type': 'test'}
]
real_confs = [
{'path': Path('..', pipeline, 'lof'), 'detector': 'lof', 'type': 'real'},
{'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'real'},
{'path': Path('..', pipeline, 'loda'), 'detector': 'loda', 'type': 'real'}
]
synth_confs =[
{'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'synthetic'},
{'path': Path('..', pipeline, 'lof'), 'detector': 'lof', 'type': 'synthetic'},
{'path': Path('..', pipeline, 'loda'), 'detector': 'loda', 'type': 'synthetic'}
]
def analyze_explanation_size():
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 7), sharey=True)
real_test_perfs = real_perfs()
synth_test_perfs = synth_perfs()
perfs_total = {**synth_test_perfs, **real_test_perfs}
min_perf = 0.2 # min([df.min().min() for df in pred_perfs_dict.values()]) / 3
i, j = 0, 0
for dname, df in perfs_total.items():
if j == 2:
j = 0
i += 1
if dname != 'Synthetic':
df.index = datasets[dname]['dims']
dname = datasets[dname]['name']
df /= 3
plot_datasets_perfs(axes[i, j], df, dname, min_perf)
j += 1
handles, labels = axes[0, 1].get_legend_handles_labels()
# fig.legend(handles, labels, loc='upper center', ncol=3, fontsize=13)
plt.subplots_adjust(wspace=.15, hspace=.5, top=.82)
#plt.tight_layout()
output_folder = Path('..', 'figures', 'results')
output_folder.mkdir(parents=True, exist_ok=True)
plt.savefig(Path(output_folder, 'dim-auc.png'), dpi=300, bbox_inches='tight', pad_inches=0)
plt.clf()
def real_perfs():
pred_perfs_dict = {}
for conf in real_confs:
print(conf)
nav_files_json = sort_files_by_dim(read_nav_files(conf['path'], conf['type']))
for dim, nav_file in nav_files_json.items():
real_dims = dim - 1
dname = get_dataset_name(nav_file[FileKeys.navigator_original_dataset_path], conf['type'] != 'real')
if dname not in datasets:
continue
print(dname + ' ' + str(real_dims) + 'd')
info_dict_proteus = read_proteus_files(nav_file)
info_dict_baselines = read_baseline_files(nav_file)
perfs_test = methods_effectiveness(nav_file, info_dict_proteus, info_dict_baselines, in_sample=False)
if perfs_test.shape[1] < fs_methods:
loda = pd.DataFrame(np.full(perfs_test.shape[0], 1), index=perfs_test.index, columns=['loda'])
perfs_test = pd.concat([perfs_test, loda], axis=1)
if dname not in pred_perfs_dict:
pred_perfs_dict[dname] = perfs_test
else:
pred_perfs_dict[dname] += perfs_test
return pred_perfs_dict
def synth_perfs():
pred_perfs_dict = {}
for conf in synth_confs:
print(conf)
perfs_test = best_models(conf)
if perfs_test.shape[1] < fs_methods:
loda = pd.DataFrame(np.full(perfs_test.shape[0], 1), index=perfs_test.index, columns=['PROTEUS_${loda}$'])
perfs_test = pd.concat([perfs_test, loda], axis=1)
if 'Synthetic' not in pred_perfs_dict:
pred_perfs_dict['Synthetic'] = perfs_test
else:
vals = pred_perfs_dict['Synthetic'].values + perfs_test.values
pred_perfs_dict['Synthetic'] = pd.DataFrame(vals, index=perfs_test.index, columns=perfs_test.columns)
return pred_perfs_dict
def best_models(conf):
best_models_perf_in_sample = | pd.DataFrame() | pandas.DataFrame |
import os
from datetime import date
from altair_saver import save
import altair as alt
import pandas as pd
import modules.c19api as c19api
def tested():
filename = "./graphs/no_tested.png"
if os.path.exists(filename):
os.remove(filename)
data = c19api.timeseries("tested_lab")
df = pd.DataFrame(data)
mapping = {
"new_neg": "Nye (Negative)",
"new_pos": "Nye (Positive)",
"new_total": "Nye",
"pr100_pos": "Andel Positive",
"total": "Akkumulert",
}
df = df.rename(columns=mapping)
df["date"] = pd.to_datetime(df["date"])
df["Andel Negative"] = 100 - df["Andel Positive"]
df = df.melt(
id_vars=["date", "Andel Positive"], var_name="category", value_name="value"
)
base = alt.Chart(
df,
title="Antall personer testet for covid-19 per dag og andel positive blant disse (Kilde: FHI)",
).encode(alt.X("yearmonthdate(date):O", axis=alt.Axis(title=None, labelAngle=-40)))
andel = base.mark_line(color="red", opacity=0.8).encode(
y=alt.Y("Andel Positive:Q", title="% Positive", axis=alt.Axis(grid=True))
)
bar = (
base.transform_filter(
(alt.datum.category == "Nye (Negative)")
| (alt.datum.category == "Nye (Positive)")
)
.mark_bar()
.encode(
y=alt.Y("value:Q", title="Antall personer testet for covid-19 per dag"),
color=alt.Color(
"category:N",
scale=alt.Scale(
domain=["Nye (Positive)", "Nye (Negative)", "% Positive"],
range=["#FF9622", "#6DA9FF", "red"],
),
legend=alt.Legend(title=None),
),
)
)
chart = (
alt.layer(bar, andel)
.resolve_scale(y="independent")
.properties(width=1200, height=600)
.configure_legend(
strokeColor="gray",
fillColor="#FFFFFF",
labelFontSize=12,
symbolStrokeWidth=2,
symbolSize=160,
padding=6,
cornerRadius=5,
direction="horizontal",
orient="none",
legendX=480,
legendY=660,
)
)
save(chart, filename)
return open(filename, "rb")
def confirmed():
data = c19api.timeseries("confirmed")
filename = "./graphs/no_confirmed.png"
if os.path.exists(filename):
os.remove(filename)
df = pd.DataFrame(data)
df["date"] = | pd.to_datetime(df["date"]) | pandas.to_datetime |
# Parses a GL5 file and extracts raw data and event information.
import pickle
import sys
import numpy as np
import os
import time
from legacy_codes import *
from message_codes import *
import datetime
import pandas as pd
from Models import *
def typecast(value, dtype):
value = np.array(value)
b = value.tobytes()
return np.frombuffer(b, dtype=dtype)
def typecast_arr(arr, dtype):
value = np.array(arr)
result = [];
b = value.tobytes();
result = np.frombuffer(b, dtype=dtype)
return result
def count_set_bits(n):
n = typecast(n, np.uint16)[0]
count = 0
while n:
count += (n & 1)
n >>= 1
return count
def extract_data(input_file_name: str, create_output_file: bool, output_folder: str):
# Parameters
skip_mer = 0 # does not extract MER data.
skip_lf = 0 # does not extract LF data.
gvars = []
# TODO: split_records
split_records = 1 # If set to 1, the records will be split when ADS_EVENT_START_REC event is found
mer_split_found = 0
lfp_split_found = 0
nStimindex=1
nPulseindex=1
nSegment = -1
t = T()
v_cal_mer = np.ones((2,8)) # MER voltage calibration - two interfaces.
v_cal_lfs = np.ones((2,8)) # LFP voltage calibration - two interfaces.
v_cal_aux = np.ones((1,2)) # voltage calibration - sync interface
gain = 12 * 5.92 * 2.82
voltage_calibration = (4/(2^23-1))/ gain
last_ts = 0
last_length = 0
last_raw_ts = 0
last_aux_ts = 0
last_lfp_ts = 0
last_lfp_len = 0
last_aux_len = 0
fp = open(input_file_name, 'rb')
stat = os.stat(input_file_name)
file_size = stat.st_size # returns size in bytes
data_size = file_size/4
num_channels = 8
ts = []
ts_lf = []
ch_index = np.ones((1, num_channels))
ch_index_lf = np.ones((1, num_channels))
ch_index_motion = np.ones((1,4)) # 3-axis accelerometer
ch_index_aux = np.ones((1,num_channels))
num_msgs = 1
mer_channel_list = []
mer_channel_map = 0
lf_channel_list = []
lf_channel_map = 0
file_data: [] = np.frombuffer(fp.read(), dtype='uint32')
fp.close()
timeStart = time.time()
index: int = 0
while index < len(file_data):
word = file_data[index]
index += 1
if word == SYNC:
msg_code = int(file_data[index])
index += 1
msg_len = file_data[index]
index += 1
if msg_code == ADS_DATA_MER:
num_msgs = num_msgs + 1
if skip_mer == 1:
index += msg_len
continue
msg_bitmap = file_data[index]
index += 1
msg_timestamp = file_data[index]
index += 1
num_channels = count_set_bits(msg_bitmap)
num_samples = int((msg_len-2)/ num_channels)
msg_seq = ( msg_bitmap & int( 'F0000000', 16)) >> 28
msg_seq += 1
if last_ts > 0:
# if msg_timestamp - last_ts > last_length:
# fprintf( '\nMER. data loss, expected: %d, found: %d', last_ts + last_length, msg_timestamp )
#
# if msg_timestamp < last_ts:
# fprintf( '\nMER. restart record at : %d, from %d', msg_timestamp, last_ts )
last_ts = msg_timestamp
last_length = num_samples
else:
last_ts = msg_timestamp
last_length = num_samples
if t.segments[nSegment].start_timestamp_mer == 0:
t.segments[nSegment].start_timestamp_mer = msg_timestamp
if mer_channel_map != msg_bitmap:
mer_channel_list = []
for k in range(0,8):
if 0 != ( msg_bitmap & (1<<k)):
mer_channel_list.append(k+1)
for i in range(0,num_channels):
channel_index = mer_channel_list[i]
if channel_index not in t.segments[nSegment].channels:
t.segments[nSegment].channels[channel_index] = Channel()
mer_channel_map = msg_bitmap
if split_records == 1:
if mer_split_found == 1:
for i in range(0,num_channels):
channel_index = mer_channel_list[i]
ch_index[0][channel_index-1] = 1
try:
if index + (msg_len-2) <= data_size:
for i in range(0,num_channels ):
channel_index = mer_channel_list[i]
m_data = np.array( file_data[index: (index + int(num_samples))] ).astype('int32')
m_data = np.multiply(m_data, t.segments[nSegment].v_cal_mer[channel_index] );
index += num_samples
if channel_index not in t.segments[nSegment].channels:
t.segments[nSegment].channels[channel_index] = Channel()
t.segments[nSegment].channels[channel_index].continuous[int(ch_index[0][channel_index -1 ]) : int((ch_index[0][channel_index - 1]) + len(m_data))] = m_data
ch_index[0][channel_index - 1] = ch_index[0][channel_index - 1] + len( m_data )
if mer_split_found == 1:
mer_split_found = 0
except Exception as err:
print( F'\nError at index {index}\n' )
raise err
elif msg_code == LFP_DATA_RAW:
if skip_lf == 1:
index += msg_len
continue
msg_bitmap = file_data[index]
index += 1
msg_timestamp = file_data[index]
index += 1
num_channels = count_set_bits(msg_bitmap)
num_samples = int((msg_len-2)/ num_channels)
#fprintf( '\nmsg_bitmap = %x', msg_bitmap )
msg_seq = ( msg_bitmap & int( 'F0000000', 16)) >> 28
msg_seq = msg_seq + 1
if lf_channel_map != msg_bitmap:
lf_channel_list = []
for k in range(0, 8):
if 0 != ( msg_bitmap & 1 << k):
lf_channel_list.append(k+1)
for i in range(0,num_channels):
channel_index = lf_channel_list[i]
if channel_index not in t.segments[nSegment].channels:
t.segments[nSegment].channels[channel_index] = Channel()
lf_channel_map = msg_bitmap
if split_records == 1:
if lfp_split_found == 1:
for i in range(0,num_channels):
channel_index = lf_channel_list[i]
ch_index_lf[0][channel_index - 1] = 1
if last_lfp_ts > 0:
# if msg_timestamp - last_lfp_ts > float(( float(t.segments[nSegment].sampling_rate_mer)/float(t.segments[nSegment].sampling_rate_lf))*last_lfp_len):
# fprintf( '\nLFP data loss, expected: %d, found: %d, dif = %d', ...
# last_lfp_ts + 32 * last_lfp_len, msg_timestamp, (last_ts + 32 * last_lfp_len) - msg_timestamp )
# else:
# fprintf( '\nLFP timestamp OK: last = %d, new: %d', last_ts, msg_timestamp)
last_lfp_ts = msg_timestamp
last_lfp_len = num_samples
else:
#fprintf( '\nfirst_lfp_ts = %d', msg_timestamp )
last_lfp_ts = msg_timestamp
last_lfp_len = num_samples
ts_lf.append(msg_timestamp)
if t.segments[nSegment].start_timestamp_lf == 0:
t.segments[nSegment].start_timestamp_lf = msg_timestamp
try:
if index + (msg_len-2) <= data_size:
for i in range(0,num_channels):
channel_index = lf_channel_list[i]
m_data = np.array( file_data[index: (index + int(num_samples))] ).astype('int32')
m_data = np.multiply(m_data, t.segments[nSegment].v_cal_mer[channel_index] );
index += num_samples
if channel_index not in t.segments[nSegment].channels:
t.segments[nSegment].channels[channel_index] = Channel()
t.segments[nSegment].channels[channel_index].lf[int(ch_index_lf[0][channel_index - 1]) : int(ch_index_lf[0][channel_index - 1] + len(m_data) - 1) ] = m_data
ch_index_lf[0][channel_index - 1] = ch_index_lf[0][channel_index - 1] + len( m_data )
if lfp_split_found == 1:
lfp_split_found = 0
except Exception as err:
print( F'\nError at index {index}\n')
raise err
elif msg_code == MOTION_DATA_MESSAGE:
msg_bitmap = file_data[index]
index += 1
msg_timestamp = file_data[index]
index += 1
num_channels = count_set_bits(msg_bitmap)
num_samples = int((msg_len-2)/(num_channels+1))
#fprintf( '\nmsg_bitmap = %x', msg_bitmap )
msg_seq = ( msg_bitmap & int( 'F0000000', 16)) >> 28
msg_seq = msg_seq + 1
if t.segments[nSegment](nSegment).motion.start_timestamp == 0:
t.segments[nSegment](nSegment).motion.start_timestamp = last_ts
try:
if index + (msg_len-2) <= data_size:
if num_channels == 4:
for i in range(0,num_channels - 1):
if i==1:
for k in range(0,int(num_samples) - 1):
d1 = file_data[index].astype(np.int64)
index = index+1
d2= file_data[index]
index = index+1
# The data is stored in little endian format.
# Must confirm that this holds on other machines if the file
# format is preserved.
start_date = ((d2.astype(np.int64) << 32) | d1.astype(np.int64)).astype(np.int64)
t.segments[nSegment].motion.data.timestamps[ch_index_motion[i]] = start_date
ch_index_motion[i] = (ch_index_motion[i] + 1)
elif i==2:
m_data = [typecast(x,np.single) for x in file_data[index: (index + num_samples)]]
t.segments[nSegment].motion.data.X[ch_index_motion[i]: (ch_index_motion[i] + len(m_data))] = m_data
ch_index_motion[i]= (ch_index_motion[i] + len(m_data))
index += num_samples
elif i==3:
m_data = [typecast(x,np.single) for x in file_data[index: (index + num_samples)]]
t.segments[nSegment].motion.data.Y[ch_index_motion[i]: (ch_index_motion[i] + len(m_data))] = m_data
ch_index_motion[i]= (ch_index_motion[i] + len(m_data))
index += num_samples
elif i==4:
m_data = [typecast(x,np.single) for x in file_data[index: (index + num_samples)]]
t.segments[nSegment].motion.data.Z[ch_index_motion[i]: (ch_index_motion[i] + len(m_data))] = m_data
ch_index_motion[i]= (ch_index_motion[i] + len(m_data))
index += num_samples
except Exception as err:
print( F'\nError at index {index}\n')
raise err
elif msg_code == SYNC_INT_INPUT_DATA:
msg_bitmap = file_data[index]
index += 1
msg_timestamp = file_data[index]
index += 1
msg_seq = ( msg_bitmap & int( 'F0000000', 16)) >> 28
msg_seq = msg_seq + 1
num_channels = count_set_bits(msg_bitmap)
num_samples = int((msg_len-2)/ num_channels)
#fprintf( '\nmsg_bitmap = %x', msg_bitmap )
if last_aux_ts > 0:
# if msg_timestamp - last_aux_ts > (last_aux_len*4):
# fprintf( '\nAUX data loss, expected: %d, found: %d', last_aux_ts + last_aux_len*4, msg_timestamp )
#
# ts = [ ts msg_timestamp ]
last_aux_ts = msg_timestamp
#fprintf( '\nlast_ts = %d', msg_timestamp )
else:
print( F'\nfirst aux ts = {msg_timestamp}')
last_aux_ts = msg_timestamp
last_aux_len = msg_len
#fprintf( '\nnum_channels = %d', num_channels )
try:
for i in range(0,num_channels):
if index + num_samples <= data_size:
m_data = typecast_arr(file_data[index: (index + num_samples)], np.single)
index += num_samples
#m_data = voltage_calibration .* m_data
# %if( isfield( t(i).channels, 'continuous' ) )
# %t(i).channels.continuous = cat(1,t(i).channels.continuous, m_data)
if i not in t.segments[nSegment].aux.channels:
t.segments[nSegment].aux.channels[i] = Channel()
t.segments[nSegment].aux.channels[i].continuous[int(ch_index_aux[0][i]) : int(ch_index_aux[0][i] + len(m_data))] = m_data
ch_index_aux[0][i] = ch_index_aux[0][i] + len( m_data )
# %else
# % t(i).channels.continuous = m_data
t.segments[nSegment].aux.timestamps_aux.append(msg_timestamp)
except Exception as err:
print( F'\nError at index {index}\n' )
raise err
elif msg_code == MOTION_SENSOR_EVENT_SAMPLING_RATE:
sampling_rate = file_data[index]
index += 1
t.segments[nSegment][nSegment].motion.sampling_rate = sampling_rate
elif msg_code == SYNC_INT_EVENT_DIG_INPUT:
deviceId = file_data[index]
index += 1
timestamp = file_data[index]
index += 1
port0 = file_data[index]
index += 1
port1 = file_data[index]
index += 1
port5 = file_data[index]
port5 = port5 & 1
index += 1
t.segments[nSegment].sync.timestamps.append(timestamp)
t.segments[nSegment].sync.port1.append(port0)
t.segments[nSegment].sync.port2.append(port1)
t.segments[nSegment].sync.digin.append(port5)
elif msg_code == SYNC_INT_EVENT_REALTIME_DIG_INPUT:
deviceId = file_data[index]
index += 1
ntimestamps = msg_len -1
for i in range(0,ntimestamps):
timestamp = file_data[index]
index += 1
t.segments[nSegment].sync.rt_timestamps.append(timestamp)
elif msg_code == SYNC_INT_EVENT_SAMPLING_RATE:
sampling_rate = file_data[index]
index += 1
t.segments[nSegment].aux.sampling_rate = sampling_rate
elif msg_code == STIM_EVENT_STIM_ON:
deviceId =file_data[index]
index += 1
stim_on_ts = file_data[index]
index += 1
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
t.segments[nSegment].stim_params[nStimindex].stim_on = stim_on_ts
print( F'\nStim On TS = {stim_on_ts}' )
print( F'\nStim index TS = {nStimindex}' )
elif msg_code == STIM_EVENT_STIM_OFF:
deviceId =file_data[index]
index += 1
stim_off_ts = file_data[index]
index += 1
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
t.segments[nSegment].stim_params[nStimindex].stim_off = stim_off_ts
nStimindex=nStimindex+1
nPulseindex=1
print( F'\nStim Off TS = {stim_off_ts}')
elif msg_code == STIM_EVENT_STIM_TYPE:
device_id = file_data[index]
index += 1
stim_type = file_data[index] # 0 - microstimulation, 1 - macrostimulation
index += 1
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
t.segments[nSegment].stim_params[nStimindex].stim_type = stim_type
print( 1, F'\nDevice {device_id}: stimulation type: 0x{stim_type}')
elif msg_code == STIM_EVENT_OPERATING_MODE:
device_id = file_data[index]
index += 1
stim_mode = file_data[index] # 0 - Constant Current, 1 - Constant Voltage
index += 1
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
t.segments[nSegment].stim_params[nStimindex].stim_mode = stim_mode
print( 1, F'\nDevice {device_id}: stimulation type: 0x{stim_mode}')
elif msg_code == STIM_EVENT_CUSTOM_WAVEFORM:
waveform_id = []
for i in range(1,4):
waveform_id.append((int(file_data[index]) & int('FF000000')) >> 24)
waveform_id.append((int(file_data[index]) & int('00FF0000')) >> 16)
waveform_id.append((int(file_data[index]) & int('0000FF00')) >> 8)
waveform_id.append(int(file_data[index]) & int('000000FF'))
index += 1
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
t.segments[nSegment].stim_params[nStimindex].identifier = waveform_id
print(F'\nWaveId = {waveform_id}')
elif msg_code == STIM_EVENT_PULSE_FREQUENCY:
mdata = file_data[index: (index + msg_len)]
index += msg_len
device_id = mdata[0]
output_channel = mdata[1]+1
frequency = mdata[2]
#t.segments[nSegment](nSegment).stim_params(nStimindex).pulse_freq = frequency
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
if nPulseindex in t.segments[nSegment].stim_params[nStimindex].pulse:
if t.segments[nSegment].stim_params[nStimindex].pulse[nPulseindex].freq != 0:
nPulseindex += 1
if nPulseindex not in t.segments[nSegment].stim_params[nStimindex].pulse:
t.segments[nSegment].stim_params[nStimindex].pulse[nPulseindex] = Pulse()
t.segments[nSegment].stim_params[nStimindex].pulse[nPulseindex].freq = frequency
t.segments[nSegment].stim_params[nStimindex].pulse[nPulseindex].channel=output_channel
print( 1, F'\nDevice {device_id}: channel {output_channel}, stimulation frequency: {frequency}')
elif msg_code== STIM_EVENT_PULSE_AMPLITUDE:
mdata = file_data[index: (index + msg_len)]
index += msg_len
device_id = mdata[0]
output_channel = mdata[1]+1
amplitude = typecast(mdata[2], np.single)[0]
#t.segments[nSegment](nSegment).stim_params(nStimindex).pulse_amplitude = amplitude
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
if nPulseindex not in t.segments[nSegment].stim_params[nStimindex].pulse:
t.segments[nSegment].stim_params[nStimindex].pulse = Pulse()
t.segments[nSegment].stim_params[nStimindex].pulse[nPulseindex].amplitude = amplitude
print( 1, F'\nDevice {device_id}: channel {output_channel}, stimulation pulse amplitude: {amplitude}')
elif msg_code == STIM_EVENT_PULSE_DURATION:
mdata = file_data[index: (index + msg_len)]
index += msg_len
device_id = mdata[0]
output_channel = mdata[1]+1
duration = mdata[2]
#t.segments[nSegment](nSegment).stim_params(nStimindex).pulse_duration = duration
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
if nPulseindex not in t.segments[nSegment].stim_params[nStimindex].pulse:
t.segments[nSegment].stim_params[nStimindex].pulse = Pulse()
t.segments[nSegment].stim_params[nStimindex].pulse[nPulseindex].duration = duration
print( 1, F'\nDevice {device_id}: channel {output_channel}, stimulation pulse duration (ticks): {duration}')
elif msg_code == STIM_EVENT_PULSE_PHASE:
mdata = file_data[index: (index + msg_len)]
index += msg_len
device_id = mdata[0]
output_channel = mdata[1]+1
phase = mdata[2] # 0 - monophasic, 1-biphasic
#t.segments[nSegment](nSegment).stim_params(nStimindex).pulse_phase = phase
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
if nPulseindex not in t.segments[nSegment].stim_params[nStimindex].pulse:
t.segments[nSegment].stim_params[nStimindex].pulse = Pulse()
t.segments[nSegment].stim_params[nStimindex].pulse[nPulseindex].phase = phase
print( 1, F'\nDevice {device_id}: channel {output_channel}, stimulation pulse phase (ticks): {phase}')
elif msg_code == STIM_EVENT_PULSE_POLARITY:
mdata = file_data[index: (index + msg_len)]
index += msg_len
device_id = mdata[0]
output_channel = mdata[1]+1
polarity = mdata[2] # 0 - negative, 1 - positive
#t.segments[nSegment](nSegment).stim_params(nStimindex).pulse_polarity = polarity
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
if nPulseindex not in t.segments[nSegment].stim_params[nStimindex].pulse:
t.segments[nSegment].stim_params[nStimindex].pulse = Pulse()
t.segments[nSegment].stim_params[nStimindex].pulse[nPulseindex].polarity = polarity
print( 1, F'\nDevice {device_id}: channel {output_channel}, stimulation pulse polarity: {polarity}')
elif msg_code == STIM_EVENT_OUTPUT_CHANNELS:
mdata = file_data[index: (index + msg_len)]
index += msg_len
device_id = mdata[0]
output_channel = mdata[1]
channel_map = mdata[2]
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
t.segments[nSegment].stim_params[nStimindex].output_channel_map = channel_map
print( 1, F'\nDevice {device_id}: channel {output_channel}, stimulation output channels (hex map): 0x{channel_map}')
elif msg_code == STIM_EVENT_RETURN_CHANNELS:
mdata = file_data[index: (index + msg_len)]
index += msg_len
device_id = mdata[0]
output_channel = mdata[1]
channel_map = mdata[2]
if nStimindex not in t.segments[nSegment].stim_params:
t.segments[nSegment].stim_params[nStimindex] = StimParams()
t.segments[nSegment].stim_params[nStimindex].return_channel_map = channel_map
print( 1, F'\nDevice {device_id}: channel {output_channel}, stimulation return channels (hex map): 0x{channel_map}')
elif msg_code == ADS_EVENT_CLASSIFICATION:
mdata = file_data[index: (index + msg_len)]
index += msg_len
position = mdata[0:1] # 64-bit position in file.
channel = mdata[2] # channel number
guid = mdata[3:] # event guid.
#fdisp(1, '\nEvent at channel %d, ts = %d', channel, last_ts)
elif msg_code == HMS_EVENT_BOARD_TYPE:
m_board_type = file_data[index]
index += 1
m_device_id = file_data[index]
index += 1
t.device.board_type = m_board_type
print( 1,F'\nReceived board type = {m_board_type}, device id = {m_device_id}')
# ---------------------------------------------------------
elif msg_code == HMS_EVENT_MER_VOLTAGE_CALIBRATION:
m_channel = file_data[index]
index += 1
m_device_id = file_data[index]
index += 1
m_vcal = typecast(file_data[index], np.single)[0]
index += 1
offset_dc = typecast(file_data[index], np.int32)[0]
index += 1
offset_ac = typecast(file_data[index], np.int32)[0]
index += 1
#fprintf(1, '\nMER voltage calibration on device %d, channel %d = %f', ...
# m_device_id, m_channel, m_vcal)
if m_device_id >= 0 and m_device_id < len(v_cal_mer):
if m_channel >= 0 and m_channel < len(v_cal_mer[0]):
t.segments[nSegment].v_cal_mer[m_channel+1] = m_vcal
t.segments[nSegment].offset_dc_mer[m_channel+1] = offset_dc
t.segments[nSegment].offset_ac_mer[m_channel+1] = offset_ac
else:
print(1,F'\nMER channel out of range {m_channel}')
else:
print(1,F'\nMER device id out of range {m_device_id}')
# ---------------------------------------------------------
elif msg_code == HMS_EVENT_LFP_VOLTAGE_CALIBRATION:
m_channel = file_data[index]
index += 1
m_device_id = file_data[index]
index += 1
m_vcal = typecast(file_data[index], np.single)[0]
index += 1
offset_dc = typecast(file_data[index], np.int32)[0]
index += 1
#fprintf(1, '\nLFP voltage calibration on device %d, channel %d = %f', ...
# m_device_id, m_channel, m_vcal)
if m_device_id >= 0 and m_device_id < len(v_cal_lfs):
if m_channel >= 0 and (m_channel) <= len(v_cal_lfs[1]):
t.segments[nSegment].v_cal_lf[m_channel+1] = m_vcal
t.segments[nSegment].offset_dc_lfs[m_channel+1] = offset_dc
else:
print(1,F'\nLF channel out of range {m_channel}')
else:
print(1,F'\nLF device id out of range {m_device_id}')
elif msg_code == EVENT_FILE_HEADER:
file_type = file_data[index]
index += 1
version = file_data[index]
index += 1
deviceId= file_data[index]
index = index+1
#start_date = int64(0)
d1 = file_data[index]
index = index+1
d2= file_data[index]
index = index+1
# The data is stored in little endian format.
# Must confirm that this holds on other machines if the file
# format is preserved.
start_date = (d2.astype(np.int64) << 32) | d1.astype(np.int64).astype(np.int64)
index1 = file_data[index]
index = index+1
index2= file_data[index]
index = index+1
#index = int( (int(index2) << 32)| int(index1))
# d = datenum(start_date)
#fdisp('\nfileType = 0x%x, version = %d, device = %d', file_type, version, deviceId)
t.file_info.version = version
t.file_info.deviceId = deviceId
t.file_info.start_time = | pd.to_datetime(start_date, unit='ns') | pandas.to_datetime |
#!/usr/bin/env python3
"""
Combine FoldX AnalyseComplex output from many complexes
"""
import sys
import argparse
import pandas as pd
from pathlib import Path
def import_complex_dir(path):
"""
Import tables from an AnalyseComplex output directory
"""
path = path.rstrip('/')
interactions = pd.read_csv(f'{path}/interactions.tsv', sep='\t')
interactions = interactions.rename({'interface_residues': 'number_of_interface_residues'},
axis='columns')
interface = pd.read_csv(f'{path}/interface_residues.tsv', sep='\t')
comb = pd.merge(interactions, interface, how='outer', on=['chain', 'position', 'wt', 'mut'])
comb['complex'] = path.split("/")[-2]
comb['interface'] = path.split("/")[-1]
cols = ['complex', 'interface', 'chain', 'position', 'wt', 'mut']
comb = comb[cols + [c for c in comb.columns if not c in cols]]
return comb
def main(args):
"""Main"""
complex_dfs = [import_complex_dir(path) for path in args.dir]
complexes = | pd.concat(complex_dfs) | pandas.concat |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
from datetime import datetime
from typing import cast, List
from unittest import TestCase
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import pytz
from dateutil import parser
from dateutil.relativedelta import relativedelta
from kats.compat.pandas import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from kats.consts import (
DEFAULT_TIME_NAME,
DEFAULT_VALUE_NAME,
TimeSeriesData,
TSIterator,
)
def load_data(file_name: str) -> pd.DataFrame:
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
# pyre-fixme[6]: For 1st param expected `bytes` but got `Optional[bytes]`.
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
TIME_COL_NAME = "ds"
VALUE_COL_NAME = "y"
MULTIVAR_VALUE_DF_COLS: List[str] = [VALUE_COL_NAME, VALUE_COL_NAME + "_1"]
EMPTY_DF = pd.DataFrame()
EMPTY_TIME_SERIES = pd.Series([], name=DEFAULT_TIME_NAME, dtype=float)
EMPTY_VALUE_SERIES = pd.Series([], name=DEFAULT_VALUE_NAME, dtype=float)
EMPTY_VALUE_SERIES_NO_NAME = pd.Series([], dtype=float)
EMPTY_TIME_DATETIME_INDEX = pd.DatetimeIndex(pd.Series([], dtype=object))
EMPTY_DF_WITH_COLS: pd.DataFrame = pd.concat([EMPTY_TIME_SERIES, EMPTY_VALUE_SERIES], axis=1)
NUM_YEARS_OFFSET = 12
class TimeSeriesBaseTest(TestCase):
def setUp(self) -> None:
# load Dataframes for testing
self.AIR_DF = load_data("air_passengers.csv")
self.AIR_DF_DATETIME = self.AIR_DF.copy(deep=True)
self.AIR_DF_DATETIME.ds = self.AIR_DF_DATETIME.ds.apply(
lambda x: parser.parse(x)
)
self.AIR_DF_UNIXTIME = self.AIR_DF.copy(deep=True)
self.AIR_DF_UNIXTIME.ds = self.AIR_DF_DATETIME.ds.apply(
lambda x: (x - datetime(1970, 1, 1)).total_seconds()
)
self.AIR_DF_WITH_DEFAULT_NAMES = self.AIR_DF.copy(deep=True)
self.AIR_DF_WITH_DEFAULT_NAMES.columns = [DEFAULT_TIME_NAME, DEFAULT_VALUE_NAME]
self.MULTIVAR_AIR_DF = self.AIR_DF.copy(deep=True)
self.MULTIVAR_AIR_DF[VALUE_COL_NAME + "_1"] = self.MULTIVAR_AIR_DF.y * 2
self.MULTIVAR_AIR_DF_DATETIME = self.MULTIVAR_AIR_DF.copy(deep=True)
self.MULTIVAR_AIR_DF_DATETIME.ds = self.MULTIVAR_AIR_DF_DATETIME.ds.apply(
lambda x: parser.parse(x)
)
self.MULTIVAR_VALUE_DF = self.MULTIVAR_AIR_DF[MULTIVAR_VALUE_DF_COLS]
self.AIR_TIME_SERIES = self.AIR_DF.ds
self.AIR_TIME_SERIES_PD_DATETIME = pd.to_datetime(self.AIR_TIME_SERIES)
self.AIR_TIME_SERIES_UNIXTIME = self.AIR_TIME_SERIES_PD_DATETIME.apply(
lambda x: (x - datetime(1970, 1, 1)).total_seconds()
)
self.AIR_VALUE_SERIES = self.AIR_DF[VALUE_COL_NAME]
self.AIR_TIME_DATETIME_INDEX = pd.DatetimeIndex(self.AIR_TIME_SERIES)
class TimeSeriesDataInitTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataInitTest, self).setUp()
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_from_df = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
# Univariate TimeSeriesData initialized from a pd.DataFrame with time
# as a datetime.datetime object
self.ts_from_df_datetime = TimeSeriesData(
df=self.AIR_DF_DATETIME, time_col_name=TIME_COL_NAME
)
# Univariate TimeSeriesData initialized from a pd.DataFrame with time
# as unix time
self.ts_from_df_with_unix = TimeSeriesData(
df=self.AIR_DF_UNIXTIME,
use_unix_time=True,
unix_time_units="s",
time_col_name=TIME_COL_NAME,
)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_from_df_multi = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
# Multivariate TimeSeriesData initialized from a pd.DataFrame with time
# as a datetime.datetime object
self.ts_from_df_multi_datetime = TimeSeriesData(
df=self.MULTIVAR_AIR_DF_DATETIME, time_col_name=TIME_COL_NAME
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as a string
self.ts_from_series_univar_no_datetime = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as a pd.Timestamp
self.ts_from_series_univar_with_datetime = TimeSeriesData(
time=self.AIR_TIME_SERIES_PD_DATETIME, value=self.AIR_VALUE_SERIES
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as unix time
self.ts_from_series_with_unix = TimeSeriesData(
time=self.AIR_TIME_SERIES_UNIXTIME,
value=self.AIR_VALUE_SERIES,
use_unix_time=True,
unix_time_units="s",
time_col_name=TIME_COL_NAME,
)
# Univariate TimeSeriesData initialized with time as a pd.Series and
# value as a pd.DataFrame
self.ts_from_series_and_df_univar = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES.to_frame()
)
# Multivariate TimeSeriesData initialized from a pd.Series for time
# and DataFrame for value
self.ts_from_series_and_df_multivar = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF
)
# Univariate TimeSeriesData initialized with time as a pd.DateTimeIndex
# and value as a pd.Series
self.ts_from_index_and_series_univar = TimeSeriesData(
time=self.AIR_TIME_DATETIME_INDEX,
value=self.AIR_VALUE_SERIES,
time_col_name=TIME_COL_NAME,
)
# Multivariate TimeSeriesData initialized with time as a
# pd.DateTimeIndex and value as a pd.DataFrame
self.ts_from_index_and_series_multivar = TimeSeriesData(
time=self.AIR_TIME_DATETIME_INDEX,
value=self.MULTIVAR_VALUE_DF,
time_col_name=TIME_COL_NAME,
)
# TimeSeriesData initialized from None Objects
self.ts_df_none = TimeSeriesData(df=None)
self.ts_time_none_and_value_none = TimeSeriesData(time=None, value=None)
# TimeSeriesData initialized from Empty Objects
self.ts_df_empty = TimeSeriesData(df=EMPTY_DF)
self.ts_time_empty_value_empty = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_VALUE_SERIES
)
self.ts_time_empty_value_empty_no_name = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_VALUE_SERIES_NO_NAME
)
self.ts_time_empty_value_empty_df = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_DF
)
self.ts_time_empty_value_empty_df_with_cols = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_DF_WITH_COLS
)
# univariate data with missing time
self.ts_univariate_missing = TimeSeriesData(
df=pd.DataFrame(
{
"time": ["2010-01-01", "2010-01-02", "2010-01-03", "2010-01-05"],
"value": [1, 2, 3, 4],
}
)
)
# multivariate data with missing time
self.ts_multi_missing = TimeSeriesData(
df=pd.DataFrame(
{
"time": ["2010-01-01", "2010-01-02", "2010-01-03", "2010-01-05"],
"value1": [1, 2, 3, 4],
"value2": [4, 3, 2, 1],
}
)
)
# univariate data with unixtime in US/Pacific with time zone
self.unix_list = (
(
pd.date_range(
"2020-03-01", "2020-03-10", tz="US/Pacific", freq="1d"
).astype(int)
/ 1e9
)
.astype(int)
.to_list()
)
self.ts_univar_PST_tz = TimeSeriesData(
df=pd.DataFrame({"time": self.unix_list, "value": [0] * 10}),
use_unix_time=True,
unix_time_units="s",
tz="US/Pacific",
)
# univariate data with unixtime in US/Pacific without time zone
self.ts_univar_PST = TimeSeriesData(
df=pd.DataFrame({"time": self.unix_list, "value": [0] * 10}),
use_unix_time=True,
unix_time_units="s",
)
# univariate data with date str with tz
date = ["2020-10-31", "2020-11-01", "2020-11-02"]
self.ts_univar_str_date_tz = TimeSeriesData(
df=pd.DataFrame({"time": date, "value": [0] * 3}),
date_format="%Y-%m-%d",
tz="US/Pacific",
)
# univariate data with date str without tz
self.ts_univar_str_date = TimeSeriesData(
df=pd.DataFrame({"time": date, "value": [0] * 3}),
date_format="%Y-%m-%d",
)
# univariate data in US/Pacific Time Zone with missing data
self.ts_univar_PST_missing_tz = TimeSeriesData(
df=pd.DataFrame(
{"time": (self.unix_list[0:4] + self.unix_list[7:10]), "value": [0] * 7}
),
use_unix_time=True,
unix_time_units="s",
tz="US/Pacific",
)
# Testing univariate time series intialized from a DataFrame
def test_init_from_df_univar(self) -> None:
# DataFrame with string time
assert_series_equal(self.ts_from_df.time, self.AIR_TIME_SERIES_PD_DATETIME)
assert_series_equal(
cast(pd.Series, self.ts_from_df.value), self.AIR_VALUE_SERIES
)
# DataFrame with datetime time
assert_series_equal(
self.ts_from_df_datetime.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_df_datetime.value), self.AIR_VALUE_SERIES
)
# DataFrame with unix time
assert_series_equal(
self.ts_from_df_with_unix.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_df_with_unix.value), self.AIR_VALUE_SERIES
)
# Testing multivariate time series initialized from a DataFrame
def test_init_from_df_multi(self) -> None:
assert_series_equal(
self.ts_from_df_multi.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_df_multi.value), self.MULTIVAR_VALUE_DF
)
# Testing univariate time series initialized from a Series and Series/DataFrame
def test_init_from_series_univar(self) -> None:
# time and value from Series, with time as string
assert_series_equal(
self.ts_from_series_univar_no_datetime.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
# time and value from Series, with time as pd.Timestamp
assert_series_equal(
self.ts_from_series_univar_with_datetime.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
assert_series_equal(
cast(pd.Series, self.ts_from_series_univar_no_datetime.value),
self.AIR_VALUE_SERIES,
)
# time and value from Series, with time out of order and `sort_by_time=True`
unsorted_df = self.AIR_DF.sample(frac=1)
resorted_ts = TimeSeriesData(
time=unsorted_df.ds,
value=unsorted_df.y,
time_col_name=TIME_COL_NAME,
sort_by_time=True,
)
self.assertEqual(resorted_ts, self.ts_from_df)
# time and value from Series, with time as unix time
assert_series_equal(
self.ts_from_series_with_unix.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_series_with_unix.value), self.AIR_VALUE_SERIES
)
# time from Series and value from DataFrame
assert_series_equal(
self.ts_from_series_and_df_univar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
print(type(self.ts_from_series_and_df_univar.value))
assert_series_equal(
cast(pd.Series, self.ts_from_series_and_df_univar.value),
self.AIR_VALUE_SERIES,
)
# Testing multivariate time series initialized from a Series/DataFrame
def test_init_from_series_multivar(self) -> None:
# Testing multivariate time series initialized from a
assert_series_equal(
self.ts_from_series_and_df_multivar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_series_and_df_multivar.value),
self.MULTIVAR_VALUE_DF,
)
# Testing univariate time series with time initialized as a
# pd.DateTimeIndex
def test_init_from_index_univar(self) -> None:
assert_series_equal(
self.ts_from_index_and_series_univar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_index_and_series_univar.value),
self.AIR_VALUE_SERIES,
)
# Testing multivariate time series with time initialized as a
# pd.DateTimeIndex
def test_init_from_index_multivar(self) -> None:
assert_series_equal(
self.ts_from_index_and_series_multivar.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_index_and_series_multivar.value),
self.MULTIVAR_VALUE_DF,
)
# Testing initialization from None Objects
def test_none(self) -> None:
# Testing initialization from None DataFrame
assert_series_equal(self.ts_df_none.time, EMPTY_TIME_SERIES)
assert_series_equal(cast(pd.Series, self.ts_df_none.value), EMPTY_VALUE_SERIES)
# Testing initialization from two None Series
assert_series_equal(self.ts_time_none_and_value_none.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_none_and_value_none.value), EMPTY_VALUE_SERIES
)
# Testing initialization from Empty Objects
def test_empty(self) -> None:
# Testing intialization from empty DataFrame
assert_series_equal(self.ts_df_empty.time, EMPTY_TIME_SERIES)
assert_series_equal(cast(pd.Series, self.ts_df_empty.value), EMPTY_VALUE_SERIES)
# Testing intialization from two empty Series
assert_series_equal(self.ts_time_empty_value_empty.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty.value), EMPTY_VALUE_SERIES
)
# Testing intialization from two empty no name Series
assert_series_equal(
self.ts_time_empty_value_empty_no_name.time, EMPTY_TIME_SERIES
)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_no_name.value),
EMPTY_VALUE_SERIES,
)
# Make sure the time and value objects here have the default names
self.assertEqual(
self.ts_time_empty_value_empty_no_name.time.name, DEFAULT_TIME_NAME
)
self.assertEqual(
self.ts_time_empty_value_empty_no_name.value.name, DEFAULT_VALUE_NAME
)
# Testing initialization from time as empty Series and value as empty
# DataFrame
assert_series_equal(self.ts_time_empty_value_empty_df.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_df.value), EMPTY_VALUE_SERIES
)
# Testing initialization from time as empty Series and value as empty
# DataFrame
assert_series_equal(
self.ts_time_empty_value_empty_df_with_cols.time, EMPTY_TIME_SERIES
)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_df_with_cols.value),
EMPTY_VALUE_SERIES,
)
# Testing incorrect initializations
def test_incorrect_init_types(self) -> None:
# Incorrect initialization with DF
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Optional[pd.core.frame.DataFrame]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(df=[])
# Incorrect initialization with value
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=None)
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None, pd.core.frame.DataFrame,
# pd.core.series.Series]` for 2nd param but got `List[Variable[_T]]`.
TimeSeriesData(time=self.AIR_TIME_SERIES, value=[])
# Incorrect initialization with time
with self.assertRaises(ValueError):
TimeSeriesData(time=None, value=self.AIR_VALUE_SERIES)
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None,
# pd.core.indexes.datetimes.DatetimeIndex, pd.core.series.Series]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(time=[], value=self.AIR_VALUE_SERIES)
# Incorrect initialization with time and value
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None,
# pd.core.indexes.datetimes.DatetimeIndex, pd.core.series.Series]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(time=[], value=[])
# Incorrect initialization with value dtypes
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES.map(str))
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF.applymap(str))
# Testing incorrect initializations
def test_incorrect_init_lengths(self) -> None:
# Incorrect initialization with different length time and values
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES[:-1])
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES[:-1], value=self.AIR_VALUE_SERIES)
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF[:-1])
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES[:-1], value=self.MULTIVAR_VALUE_DF)
# Testing DataFrame conversion
def test_to_dataframe(self) -> None:
# Univariate case
assert_frame_equal(self.ts_from_df.to_dataframe(), self.AIR_DF_DATETIME)
# Multivariate case
assert_frame_equal(
self.ts_from_df_multi_datetime.to_dataframe(), self.MULTIVAR_AIR_DF_DATETIME
)
# Series Cases
assert_frame_equal(
self.ts_from_series_univar_no_datetime.to_dataframe(), self.AIR_DF_DATETIME
)
assert_frame_equal(
self.ts_from_series_univar_with_datetime.to_dataframe(),
self.AIR_DF_DATETIME,
)
# Series/DataFrame Cases
assert_frame_equal(
self.ts_from_series_and_df_univar.to_dataframe(), self.AIR_DF_DATETIME
)
assert_frame_equal(
self.ts_from_series_and_df_multivar.to_dataframe(),
self.MULTIVAR_AIR_DF_DATETIME,
)
# Empty/None Cases
assert_frame_equal(self.ts_df_none.to_dataframe(), EMPTY_DF_WITH_COLS)
assert_frame_equal(
self.ts_time_none_and_value_none.to_dataframe(), EMPTY_DF_WITH_COLS
)
assert_frame_equal(self.ts_df_empty.to_dataframe(), EMPTY_DF_WITH_COLS)
assert_frame_equal(
self.ts_time_empty_value_empty.to_dataframe(), EMPTY_DF_WITH_COLS
)
assert_frame_equal(
self.ts_time_empty_value_empty_df.to_dataframe(), EMPTY_DF_WITH_COLS
)
# Testing Data Interpolate
def test_interpolate(self) -> None:
# univariate
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3.5, 4],
}
)
),
)
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="ffill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3, 4],
}
)
),
)
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="bfill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 4, 4],
}
)
),
)
# multivariate
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3.5, 4],
"value2": [4, 3, 2, 1.5, 1],
}
)
),
)
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="ffill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3, 4],
"value2": [4, 3, 2, 2, 1],
}
)
),
)
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="bfill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 4, 4],
"value2": [4, 3, 2, 1, 1],
}
)
),
)
# test with no frequency given univariate
self.assertEqual(
self.ts_univariate_missing.interpolate(method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3.5, 4],
}
)
),
)
# no frequency given, for multivariate
self.assertEqual(
self.ts_multi_missing.interpolate(method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3.5, 4],
"value2": [4, 3, 2, 1.5, 1],
}
)
),
)
def test_to_array(self) -> None:
# Univariate case
np.testing.assert_array_equal(
self.ts_from_df.to_array(), self.AIR_DF_DATETIME.to_numpy()
)
# Multivariate case
np.testing.assert_array_equal(
self.ts_from_df_multi_datetime.to_array(),
self.MULTIVAR_AIR_DF_DATETIME.to_numpy(),
)
# Series Cases
np.testing.assert_array_equal(
self.ts_from_series_univar_no_datetime.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
np.testing.assert_array_equal(
self.ts_from_series_univar_with_datetime.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
# Series/DataFrame Cases
np.testing.assert_array_equal(
self.ts_from_series_and_df_univar.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
np.testing.assert_array_equal(
self.ts_from_series_and_df_multivar.to_array(),
self.MULTIVAR_AIR_DF_DATETIME.to_numpy(),
)
# Empty/None Cases
np.testing.assert_array_equal(self.ts_df_none.to_array(), np.empty)
np.testing.assert_array_equal(
self.ts_time_none_and_value_none.to_array(), np.empty
)
np.testing.assert_array_equal(self.ts_df_empty.to_array(), np.empty)
np.testing.assert_array_equal(
self.ts_time_empty_value_empty.to_array(), np.empty
)
np.testing.assert_array_equal(
self.ts_time_empty_value_empty_df.to_array(), np.empty
)
def test_tz(self) -> None:
self.ts_univar_PST_tz.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(self.ts_univar_PST_tz.freq_to_timedelta(), pd.Timedelta("1d"))
self.assertEqual(self.ts_univar_PST_tz.tz(), pytz.timezone("US/Pacific"))
self.assertTrue(
(
np.array(self.unix_list)
== (self.ts_univar_PST_tz.time.values.astype(int) / 1e9).astype(int)
).all()
)
with self.assertRaisesRegex(
ValueError, "Only constant frequency is supported for time!"
):
self.ts_univar_PST.validate_data(
validate_frequency=True, validate_dimension=True
)
self.ts_univar_str_date.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(
self.ts_univar_str_date.freq_to_timedelta(), pd.Timedelta("1d")
)
self.ts_univar_str_date_tz.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(
self.ts_univar_str_date_tz.freq_to_timedelta(), pd.Timedelta("1d")
)
self.assertEqual(self.ts_univar_PST_tz.tz(), pytz.timezone("US/Pacific"))
# test ambiguous
tsd = TimeSeriesData(
df=pd.DataFrame(
{
"time": [
"2018-10-28 01:30:00",
"2018-10-28 02:00:00",
"2018-10-28 02:30:00",
"2018-10-28 02:00:00",
"2018-10-28 02:30:00",
"2018-10-28 03:00:00",
"2018-10-28 03:30:00",
],
"value": [0] * 7,
}
),
tz="CET",
tz_ambiguous="infer",
)
tsd.validate_data(validate_frequency=True, validate_dimension=True)
# test nonexistent
tsd = TimeSeriesData(
df=pd.DataFrame(
{
"time": [
"2020-03-08 02:00:00",
"2020-03-08 02:30:00",
"2020-03-08 03:00:00",
],
"value": [0] * 3,
}
),
tz="US/Pacific",
tz_nonexistent="shift_forward",
)
def test_infer_freq_robust(self) -> None:
self.assertEqual(
self.ts_univariate_missing.infer_freq_robust(),
pd.Timedelta(value=1, unit="D"),
)
self.assertEqual(
self.ts_univar_PST_missing_tz.infer_freq_robust(),
pd.Timedelta(value=1, unit="D"),
)
def test_is_data_missing(self) -> None:
self.assertEqual(self.ts_univariate_missing.is_data_missing(), True)
self.assertEqual(self.ts_univar_PST_missing_tz.is_data_missing(), True)
self.assertEqual(self.ts_from_series_and_df_univar.is_data_missing(), False)
self.assertEqual(self.ts_from_series_and_df_multivar.is_data_missing(), False)
def test_min_max_values(self) -> None:
# test min/max value for univariate
self.assertEqual(self.ts_from_df.min, np.nanmin(self.ts_from_df.value.values))
self.assertEqual(self.ts_from_df.max, np.nanmax(self.ts_from_df.value.values))
# test min/max value for multivariate
self.assertEqual(
# pyre-fixme[16]: `float` has no attribute `equals`.
self.ts_from_df_multi.min.equals(
self.ts_from_df_multi.value.min(skipna=True)
),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
self.ts_from_df_multi.max.equals(
self.ts_from_df_multi.value.max(skipna=True)
),
True,
)
# test min/max value for empty TS
empty_ts = TimeSeriesData(pd.DataFrame())
self.assertEqual(np.isnan(empty_ts.min), True)
self.assertEqual(np.isnan(empty_ts.max), True)
# test if min/max changes if values are re-assigned for univariate
ts_from_df_new = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
new_val = np.random.randn(len(self.AIR_DF))
ts_from_df_new.value = pd.Series(new_val)
self.assertEqual(ts_from_df_new.min, np.min(new_val))
self.assertEqual(ts_from_df_new.max, np.max(new_val))
# test if min/max changes if values are re-assigned with NaNs for univariate
new_val[-1] = np.nan
ts_from_df_new.value = pd.Series(new_val)
self.assertEqual(ts_from_df_new.min, np.nanmin(new_val))
self.assertEqual(ts_from_df_new.max, np.nanmax(new_val))
# test min/max changes if values are re-assigned for multivariate
ts_from_df_multi_new = TimeSeriesData(
self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
new_val_multi = np.random.randn(
self.MULTIVAR_VALUE_DF.shape[0], self.MULTIVAR_VALUE_DF.shape[1] - 1
)
ts_from_df_multi_new.value = pd.DataFrame(new_val_multi)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.min.equals(pd.DataFrame(new_val_multi).min()),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.max.equals(pd.DataFrame(new_val_multi).max()),
True,
)
# test min/max changes if values are re-assigned with NaNs for multivariate
new_val_multi[0] = np.nan
ts_from_df_multi_new.value = pd.DataFrame(new_val_multi)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.min.equals(
pd.DataFrame(new_val_multi).min(skipna=True)
),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.max.equals(
pd.DataFrame(new_val_multi).max(skipna=True)
),
True,
)
class TimeSeriesDataOpsTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataOpsTest, self).setUp()
# Creating DataFrames
# DataFrame with date offset
transformed_df_date = self.AIR_DF_DATETIME.copy(deep=True)
transformed_df_date.ds = transformed_df_date.ds.apply(
lambda x: x + relativedelta(years=NUM_YEARS_OFFSET)
)
transformed_df_date_concat = self.AIR_DF.append(
transformed_df_date, ignore_index=True
)
transformed_df_date_double = self.AIR_DF_DATETIME.copy(deep=True)
transformed_df_date_double.ds = transformed_df_date.ds.apply(
lambda x: x + relativedelta(years=NUM_YEARS_OFFSET * 2)
)
transformed_df_date_concat_double = self.AIR_DF.append(
transformed_df_date_double, ignore_index=True
)
# DataFrames with value offset
transformed_df_value = self.AIR_DF.copy(deep=True)
transformed_df_value.y = transformed_df_value.y.apply(lambda x: x * 2)
transformed_df_value_inv = self.AIR_DF.copy(deep=True)
transformed_df_value_inv.y = transformed_df_value_inv.y.apply(lambda x: x * -1)
# DataFrame with date and value offset
transformed_df_date_and_value = transformed_df_date.copy(deep=True)
transformed_df_date_and_value.y = transformed_df_date_and_value.y.apply(
lambda x: x * 2
)
# DataFrame with date offset (multivariate)
transformed_df_date_multi = transformed_df_date.copy(deep=True)
transformed_df_date_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_multi.y * 2
)
transformed_df_date_concat_multi = self.MULTIVAR_AIR_DF.append(
transformed_df_date_multi, ignore_index=True
)
transformed_df_date_concat_mixed = self.MULTIVAR_AIR_DF_DATETIME.append(
transformed_df_date
)
transformed_df_date_double_multi = transformed_df_date_double.copy(deep=True)
transformed_df_date_double_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_double_multi.y * 2
)
transformed_df_date_concat_double_multi = self.MULTIVAR_AIR_DF.append(
transformed_df_date_double_multi, ignore_index=True
)
transformed_df_date_concat_double_mixed = self.MULTIVAR_AIR_DF_DATETIME.append(
transformed_df_date_double
)
# DataFrame with value offset (multivariate)
transformed_df_value_none_multi = self.MULTIVAR_AIR_DF.copy(deep=True)
transformed_df_value_none_multi.y = transformed_df_value_none_multi.y_1
transformed_df_value_none_multi.y_1 = np.nan
# DataFrame with date and value offset (multivariate)
transformed_df_date_and_value_multi = transformed_df_date_and_value.copy(
deep=True
)
transformed_df_date_and_value_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_and_value_multi.y * 2
)
# DataFrame with all constant values
df_zeros = self.AIR_DF.copy(deep=True)
df_zeros.y.values[:] = 0
df_ones = self.AIR_DF.copy(deep=True)
df_ones.y.values[:] = 1
df_twos = df_ones.copy(deep=True)
df_twos.y.values[:] = 2
df_neg_ones = self.AIR_DF.copy(deep=True)
df_neg_ones.y.values[:] = -1
df_ones_multi = df_ones.copy(deep=True)
df_ones_multi[VALUE_COL_NAME + "_1"] = df_ones_multi.y * 2
# Creating TimeSeriesData objects
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_univ_1 = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
self.ts_univ_2 = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
self.ts_univ_default_names = TimeSeriesData(df=self.AIR_DF_WITH_DEFAULT_NAMES)
self.ts_univ_default_names_2 = TimeSeriesData(df=self.AIR_DF_WITH_DEFAULT_NAMES)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_multi_1 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date offset
self.ts_date_transform_univ = TimeSeriesData(
df=transformed_df_date, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_univ = TimeSeriesData(
df=transformed_df_date_concat, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_double_univ = TimeSeriesData(
df=transformed_df_date_double, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_univ = TimeSeriesData(
df=transformed_df_date_concat_double, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date offset (multivariate)
self.ts_date_transform_multi = TimeSeriesData(
df=transformed_df_date_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_multi = TimeSeriesData(
df=transformed_df_date_concat_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_mixed = TimeSeriesData(
df=transformed_df_date_concat_mixed, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_double_multi = TimeSeriesData(
df=transformed_df_date_double_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_multi = TimeSeriesData(
df=transformed_df_date_concat_double_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_mixed = TimeSeriesData(
df=transformed_df_date_concat_double_mixed, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with value offset
self.ts_value_transform_univ = TimeSeriesData(
df=transformed_df_value, time_col_name=TIME_COL_NAME
)
self.ts_value_transform_inv_univ = TimeSeriesData(
df=transformed_df_value_inv, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with value offset (multivariate)
self.ts_value_transform_none_multi = TimeSeriesData(
df=transformed_df_value_none_multi, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date and value offset
self.ts_date_and_value_transform_univ = TimeSeriesData(
df=transformed_df_date_and_value, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date and value offset (multivariate)
self.ts_date_and_value_transform_multi = TimeSeriesData(
df=transformed_df_date_and_value_multi, time_col_name=TIME_COL_NAME
)
# TimeSeriesData object with all constant values
self.ts_zero = TimeSeriesData(df=df_zeros, time_col_name=TIME_COL_NAME)
self.ts_ones = TimeSeriesData(df=df_ones, time_col_name=TIME_COL_NAME)
self.ts_twos = TimeSeriesData(df=df_twos, time_col_name=TIME_COL_NAME)
self.ts_neg_ones = TimeSeriesData(df=df_neg_ones, time_col_name=TIME_COL_NAME)
self.ts_ones_multi = TimeSeriesData(
df=df_ones_multi, time_col_name=TIME_COL_NAME
)
# Empty TimeSeriesData Object
self.ts_empty = TimeSeriesData(df=EMPTY_DF)
self.ts_empty_with_cols = TimeSeriesData(
df=EMPTY_DF_WITH_COLS, time_col_name=TIME_COL_NAME
)
# Copies for Extended objects
self.ts_univ_extend = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_univ_extend_2 = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_univ_extend_err = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_3 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_4 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_err = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_err_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_empty_extend = TimeSeriesData(df=EMPTY_DF)
self.ts_empty_extend_err = TimeSeriesData(df=EMPTY_DF)
# Other values
self.length = len(self.AIR_DF)
def test_eq(self) -> None:
# Univariate equality
self.assertTrue(self.ts_univ_1 == self.ts_univ_2)
# Multivariate equality
self.assertTrue(self.ts_multi_1 == self.ts_multi_2)
# Univariate inequality
self.assertFalse(self.ts_univ_1 == self.ts_date_transform_univ)
self.assertFalse(self.ts_univ_1 == self.ts_value_transform_univ)
self.assertFalse(self.ts_univ_1 == self.ts_date_and_value_transform_univ)
# Multivariate inequality
self.assertFalse(self.ts_multi_1 == self.ts_date_transform_multi)
self.assertFalse(self.ts_multi_1 == self.ts_value_transform_none_multi)
self.assertFalse(self.ts_multi_1 == self.ts_date_and_value_transform_multi)
# Univariate vs. Multivariate inequality
self.assertFalse(self.ts_univ_1 == self.ts_multi_1)
self.assertFalse(self.ts_multi_1 == self.ts_univ_1)
def test_ne(self) -> None:
# Univariate equality
self.assertFalse(self.ts_univ_1 != self.ts_univ_2)
# Multivariate equality
self.assertFalse(self.ts_multi_1 != self.ts_multi_2)
# Univariate inequality
self.assertTrue(self.ts_univ_1 != self.ts_date_transform_univ)
self.assertTrue(self.ts_univ_1 != self.ts_value_transform_univ)
self.assertTrue(self.ts_univ_1 != self.ts_date_and_value_transform_univ)
# Multivariate inequality
self.assertTrue(self.ts_multi_1 != self.ts_date_transform_multi)
self.assertTrue(self.ts_multi_1 != self.ts_value_transform_none_multi)
self.assertTrue(self.ts_multi_1 != self.ts_date_and_value_transform_multi)
# Univariate vs. Multivariate inequality
self.assertTrue(self.ts_univ_1 != self.ts_multi_1)
self.assertTrue(self.ts_multi_1 != self.ts_univ_1)
def test_add(self) -> None:
# Add same DataFrames
self.assertEqual(self.ts_univ_1 + self.ts_univ_2, self.ts_value_transform_univ)
# Add different DataFrames
self.assertEqual(
self.ts_univ_1 + self.ts_value_transform_inv_univ, self.ts_zero
)
# Add Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_univ_1 + self.ts_multi_1, self.ts_value_transform_none_multi
)
# Empty Case
self.assertEqual(self.ts_empty + self.ts_empty, self.ts_empty)
# Add DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 + self.ts_date_transform_univ
def test_sub(self) -> None:
# Subtract same DataFrames
self.assertEqual(self.ts_univ_1 - self.ts_univ_2, self.ts_zero)
# Subtract different DataFrames
self.assertEqual(
self.ts_univ_1 - self.ts_value_transform_inv_univ,
self.ts_value_transform_univ,
)
# Subtract Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_multi_1 - self.ts_value_transform_inv_univ,
self.ts_value_transform_none_multi,
)
# Empty Case
self.assertEqual(self.ts_empty - self.ts_empty, self.ts_empty)
# Subtract DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 - self.ts_date_transform_univ
def test_div(self) -> None:
# Divide same DataFrames
self.assertEqual(self.ts_univ_1 / self.ts_univ_2, self.ts_ones)
# Divide different DataFrames
self.assertEqual(
self.ts_univ_1 / self.ts_value_transform_inv_univ, self.ts_neg_ones
)
# Divide Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_value_transform_univ / self.ts_ones_multi,
self.ts_value_transform_none_multi,
)
# Empty Case
self.assertEqual(self.ts_empty / self.ts_empty, self.ts_empty)
# Divide DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 / self.ts_date_transform_univ
def test_mul(self) -> None:
# Multiply same DataFrames
self.assertEqual(self.ts_ones * self.ts_ones, self.ts_ones)
# Multiply different DataFrames
self.assertEqual(self.ts_univ_1 * self.ts_twos, self.ts_value_transform_univ)
# Multiply Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_multi_1 * self.ts_twos, self.ts_value_transform_none_multi
)
# Empty Case
self.assertEqual(self.ts_empty * self.ts_empty, self.ts_empty)
# Multiply DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 * self.ts_date_transform_univ
def test_len(self) -> None:
# Normal case
self.assertEqual(len(self.ts_univ_1), self.length)
# Empty case
self.assertEqual(len(self.ts_empty), 0)
def test_empty(self) -> None:
# Empty case
self.assertTrue(self.ts_empty.is_empty())
# Not empty case
self.assertFalse(self.ts_univ_1.is_empty())
def test_extend(self) -> None:
# Testing cases with validate=True
# Univariate case
self.ts_univ_extend.extend(self.ts_date_transform_univ)
self.assertEqual(self.ts_univ_extend, self.ts_date_transform_concat_univ)
# Multivariate case
self.ts_multi_extend.extend(self.ts_date_transform_multi)
self.assertEqual(self.ts_multi_extend, self.ts_date_transform_concat_multi)
# Univariate and multivariate case
self.ts_multi_extend_2.extend(self.ts_date_transform_univ)
self.assertEqual(self.ts_multi_extend_2, self.ts_date_transform_concat_mixed)
# Empty case
self.ts_univ_default_names.extend(self.ts_empty)
self.assertEqual(self.ts_univ_default_names, self.ts_univ_default_names_2)
# Catching errors
with self.assertRaises(ValueError):
self.ts_univ_extend_err.extend(self.ts_date_transform_double_univ)
# Multivariate case
self.ts_multi_extend_err.extend(self.ts_date_transform_double_multi)
# Univariate and multivariate case
self.ts_multi_extend_err_2.extend(self.ts_date_transform_double_univ)
# Empty case
self.ts_empty_extend_err.extend(self.ts_empty)
# Testing cases with validate=False
# Univariate case
self.ts_univ_extend_2.extend(self.ts_date_transform_double_univ, validate=False)
self.assertEqual(
self.ts_univ_extend_2, self.ts_date_transform_concat_double_univ
)
# Multivariate case
self.ts_multi_extend_3.extend(
self.ts_date_transform_double_multi, validate=False
)
self.assertEqual(
self.ts_multi_extend_3, self.ts_date_transform_concat_double_multi
)
# Univariate and multivariate case
self.ts_multi_extend_4.extend(
self.ts_date_transform_double_univ, validate=False
)
self.assertEqual(
self.ts_multi_extend_4, self.ts_date_transform_concat_double_mixed
)
# Empty case
self.ts_empty_extend.extend(self.ts_empty, validate=False)
self.assertEqual(self.ts_empty_extend, self.ts_empty)
def test_get_item(self) -> None:
# Univariate test case
self.assertEqual(
self.ts_date_transform_concat_univ[: len(self.ts_univ_1)], self.ts_univ_1
)
# Multivariate test case
self.assertEqual(
self.ts_date_transform_concat_multi[: len(self.ts_multi_1)], self.ts_multi_1
)
# Multivariate test case where we select a specific column
for col in self.ts_date_transform_concat_multi.value.columns:
ts_univ = TimeSeriesData(
time=self.ts_date_transform_concat_multi.time,
value=self.ts_date_transform_concat_multi.value[col],
time_col_name=self.ts_date_transform_concat_multi.time_col_name,
)
self.assertEqual(self.ts_date_transform_concat_multi[col], ts_univ)
# Multivariate test case where we select multiple columns
self.assertEqual(
self.ts_date_transform_concat_multi[MULTIVAR_VALUE_DF_COLS],
self.ts_date_transform_concat_multi,
)
# Full/Empty cases
self.assertEqual(self.ts_univ_1[:], self.ts_univ_1)
self.assertEqual(
self.ts_univ_1[0:0],
TimeSeriesData(
time=pd.Series(name=TIME_COL_NAME),
value=pd.Series(name=VALUE_COL_NAME),
time_col_name=TIME_COL_NAME,
),
)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot(self) -> plt.Figure:
# Univariate test case
ax = self.ts_univ_1.plot(cols=["y"])
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_multivariate(self) -> plt.Figure:
# Multivariate test case
ax = self.ts_multi_1.plot()
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_params(self) -> plt.Figure:
# Test more parameter overrides.
ax = self.ts_multi_1.plot(
figsize=(8, 3), plot_kwargs={"cmap": "Purples"}, grid=False
)
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_grid_ax(self) -> plt.Figure:
# Test grid and ax parameter overrides.
fig, ax = plt.subplots(figsize=(6, 4))
ax = self.ts_univ_1.plot(ax=ax, grid_kwargs={"lw": 2, "ls": ":"})
self.assertIsNotNone(ax)
return fig
def test_plot_missing_column(self) -> None:
# Columns not in data.
with self.assertRaises(ValueError):
self.ts_univ_1.plot(cols=["z"])
def test_plot_empty(self) -> None:
# No data to plot.
with self.assertRaises(ValueError):
self.ts_empty.plot()
class TimeSeriesDataMiscTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataMiscTest, self).setUp()
# Creating TimeSeriesData objects
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_univ = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_multi = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
def test_is_univariate(self) -> None:
# Univariate case
self.assertTrue(self.ts_univ.is_univariate())
# Multivariate case
self.assertFalse(self.ts_multi.is_univariate())
def test_time_to_index(self) -> None:
# Univariate case
assert_index_equal(self.ts_univ.time_to_index(), self.AIR_TIME_DATETIME_INDEX)
# Multivariate case
assert_index_equal(self.ts_multi.time_to_index(), self.AIR_TIME_DATETIME_INDEX)
def test_repr(self) -> None:
# Univariate case
self.assertEqual(self.ts_univ.__repr__(), self.AIR_DF_DATETIME.__repr__())
# Multivariate case
self.assertEqual(
self.ts_multi.__repr__(), self.MULTIVAR_AIR_DF_DATETIME.__repr__()
)
def test_repr_html(self) -> None:
# Univariate case
self.assertEqual(self.ts_univ._repr_html_(), self.AIR_DF_DATETIME._repr_html_())
# Multivariate case
self.assertEqual(
self.ts_multi._repr_html_(), self.MULTIVAR_AIR_DF_DATETIME._repr_html_()
)
class TSIteratorTest(TestCase):
def test_ts_iterator_univariate_next(self) -> None:
df = pd.DataFrame(
[["2020-03-01", 100], ["2020-03-02", 120], ["2020-03-03", 130]],
columns=["time", "y"],
)
kats_data = TimeSeriesData(df=df)
kats_iterator = TSIterator(kats_data)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-01")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([100]), check_names=False
)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-02")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([120]), check_names=False
)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-03")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([130]), check_names=False
)
def test_ts_iterator_multivariate_next(self) -> None:
df = pd.DataFrame(
[
["2020-03-01", 100, 200],
["2020-03-02", 120, 220],
["2020-03-03", 130, 230],
],
columns=["time", "y1", "y2"],
)
kats_data = TimeSeriesData(df=df)
kats_iterator = TSIterator(kats_data)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-01")]), check_names=False
)
assert_frame_equal(
cast(pd.DataFrame, val.value),
pd.DataFrame([[100, 200]], columns=["y1", "y2"]),
)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-02")]), check_names=False
)
assert_frame_equal(
cast(pd.DataFrame, val.value),
pd.DataFrame([[120, 220]], columns=["y1", "y2"]),
)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-03")]), check_names=False
)
assert_frame_equal(
cast(pd.DataFrame, val.value),
pd.DataFrame([[130, 230]], columns=["y1", "y2"]),
)
def test_ts_iterator_comprehension(self) -> None:
kats_data = TimeSeriesData(
time=pd.to_datetime(
np.array([1596225347, 1596225348, 1596225349]), unit="s", utc=True
),
value=pd.Series(np.array([1, 2, 4])),
)
kats_iterator = TSIterator(kats_data)
kats_list = list(kats_iterator)
val = kats_list[0]
assert_series_equal(
val.time,
pd.Series([pd.Timestamp("2020-07-31 19:55:47+0000", tz="UTC")]),
check_names=False,
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([1]), check_names=False
)
val = kats_list[1]
assert_series_equal(
val.time,
pd.Series([pd.Timestamp("2020-07-31 19:55:48+0000", tz="UTC")]),
check_names=False,
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([2]), check_names=False
)
val = kats_list[2]
assert_series_equal(
val.time,
pd.Series([pd.Timestamp("2020-07-31 19:55:49+0000", tz="UTC")]),
check_names=False,
)
assert_series_equal(
cast(pd.Series, val.value), | pd.Series([4]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@created: 11.11.19
@author: felix
"""
from typing import Optional
from collections import Counter
from calendar import monthrange
import datetime
import pandas as pd
class Weekday:
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
def meetup_date(year: int, month: int, *,
nth: Optional[int] = None,
weekday: Optional[any] = None) -> datetime.date:
thursday = weekday if weekday is not None else 3
dates = [datetime.date(year=year, month=month, day=i) for i in range(1, monthrange(year, month)[1] + 1)]
df = pd.DataFrame(data=dates, columns=['date', ])
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import funcy
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
from dateutil import parser
from tqdm import tqdm
from utils.helpers import *
from utils.plot import plot_joint_distribution
font = {
"size": 30
}
matplotlib.rc("font", **font)
pd.options.mode.chained_assignment = None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MOST_RECENT_FILE = sorted(os.listdir(os.path.join(BASE_DIR, "data", "REDCap")))[-1]
REDCAP_FPATH = os.path.join(BASE_DIR, "data", "REDCap", MOST_RECENT_FILE)
SERIES_ID_FPATH = os.path.join(BASE_DIR, "data", "match_redcap_plataforma.csv")
SEGMENTATION_FPATH = os.path.join(BASE_DIR, "data", "inference_df.csv")
get_date_regex = r"ProjetoCOVIDAI_DATA_(?P<data>.*)_\d+.csv"
date_str = re.match(get_date_regex, MOST_RECENT_FILE).group("data")
dataset_date = parser.parse(date_str)
# Normalize name and CPF
df = pd.read_csv(REDCAP_FPATH)
df.nome = df.nome.apply(lambda s: to_normalized_string(s) if pd.notna(s) else s)
df.cpf = df.cpf.apply(lambda v: str(int(v)) if pd.notna(v) else v)
# Fill redcap_repeat_instrument missing data with "dados_pessoais_unico" since these
# rows are not filled automatically by the database
df.redcap_repeat_instrument = df.redcap_repeat_instrument.fillna("dados_pessoais_unico")
# Fill the missing hospitalization date with date of admission to ICU if existent
df.data_admissao_hospitalar = df.data_admissao_hospitalar.fillna(df.data_admissao_uti)
# Calculate length of stay based on hospitalization date and date of discharge or
# date of death
fill_length_of_stay = df.apply(
lambda row: calculate_length_of_stay(
row["data_admissao_hospitalar"],
row["data_alta_hospitalar"],
row["data_obito"]
),
axis=1
)
df.tempo_estadia_hospitalar = df.tempo_estadia_hospitalar.fillna(fill_length_of_stay)
# Calculate the date of discharge from ICU based on the date of admission
# in the ICU and length of stay in the ICU.
df["data_alta_uti"] = df.apply(
lambda row: sum_date_with_interval(
row["data_admissao_uti"],
row["tempo_estadia_uti"]
),
axis=1
)
# Calculate the date of removal of the ventilation based on the date of ventilation
# and the length of ventilation
df["data_remocao_ventilacao"] = df.apply(
lambda row: sum_date_with_interval(
row["data_ventilacao"],
row["tempo_ventilacao_mecanica"]
),
axis=1
)
# Calculate age and body mass index
df["idade"] = df.apply(
lambda row: calculate_age(
row["data_nasc"],
row["data_admissao_hospitalar"],
dataset_date
),
axis=1
)
df["imc"] = df.peso / (df.altura ** 2)
# Some of the rows have the plaquets number in a different unity and need to be
# multiplied by 1000
df.plaquetas = df.plaquetas.apply(lambda v: v * 1000 if v < 1000 else v)
############################## Finished processing the ordinary data ##############################
# Here we define variables useful for processing the rest of the data
cols_intermediate_outcomes = [
"data_sepse",
"sepse",
"data_sdra",
"sdra",
"data_falencia_cardiaca",
"falencia_cardiaca",
"data_choque_septico",
"choque_septico",
"data_coagulopatia",
"coagulopatia",
"data_iam",
"iam",
"data_ira",
"ira"
]
cols_personal_data = [
"nome",
"cpf",
"instituicao",
"data_nasc",
"idade",
"sexo",
"altura",
"peso",
"imc",
"alta",
"obito",
"data_admissao_hospitalar",
"data_admissao_uti",
"data_obito",
"data_alta_hospitalar",
"data_alta_uti",
"data_ventilacao",
"data_remocao_ventilacao",
"tempo_estadia_hospitalar",
"tempo_estadia_uti",
"tempo_ventilacao_mecanica"
] + cols_intermediate_outcomes
cols_comorbidities = [
"has",
"ieca_bra",
"dm",
"asma",
"tabagista",
"dpoc",
"cardiopatia",
"irc",
"neoplasia",
"aids",
"neutropenia"
]
cols_respiratory_comorbidities = [
"asma", "tabagista", "dpoc"
]
cols_cardiac_comorbidities = [
"has", "cardiopatia"
]
cols_dates = [
col for col in df.columns
if "data" in col and col not in
cols_personal_data + ["redcap_data_access_group"]
]
identity_map = {
0: 0,
1: 1
}
irc_map = {
1: "negativo",
2: "nao_dialitico",
3: "dialitico"
}
neoplasia_map = {
1: "negativo",
2: "primaria_ou_secundaria",
3: "outras"
}
map_comorbidities = {
"irc": irc_map,
"neoplasia": neoplasia_map
}
# Now we build a separate dataframe for saving pesonal data.
df_personal_data = df[df.redcap_repeat_instrument == "dados_pessoais_unico"]
# Discriminate patients that were admitted to the hospital and to the ICU. Also, discriminate those that
# were discharged and those who died.
df_personal_data["internacao"] = df_personal_data.data_admissao_hospitalar.notna()
df_personal_data["uti"] = df_personal_data.data_admissao_uti.notna()
df_personal_data["obito"] = df_personal_data.data_obito.notna()
df_personal_data["alta"] = df_personal_data.data_alta_hospitalar.notna()
df_personal_data = df_personal_data[
["record_id"] + cols_personal_data + cols_comorbidities
]
for col in cols_comorbidities:
df_personal_data[col] = df_personal_data[col].map(map_comorbidities.get(col, identity_map))
# Count the number of previous comorbidities each patient has.
df_personal_data["n_comorbidades"] = df_personal_data[cols_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_respiratorias"] = df_personal_data[cols_respiratory_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_cardiacas"] = df_personal_data[cols_cardiac_comorbidities].apply(count_comorbidities, axis=1)
############################## Finished processing the personal data ##############################
# Now we build separate dataframes for saving clinical, treatment, laboratorial, image and confirmatory data.
# Clinical dataframe
cols_clinical = [
"data_dispneia",
"dispneia",
"data_sofa",
"sofa_score",
"data_saturacao_o2",
"saturacao_o2",
"data_saps_3",
"saps_3"
]
df_clinical = df[df.redcap_repeat_instrument == "evolucao_clinica_multiplo"]
df_clinical = df_clinical[["record_id"] + cols_clinical]
# We need separate dataframes for each date. Note that the clinical dataframe has four date. We will separate
# the columns accordingly.
df_dispneia = df_clinical[[
"record_id",
"data_dispneia",
"dispneia"
]]
df_sofa = df_clinical[[
"record_id",
"data_sofa",
"sofa_score"
]]
df_saturacao_o2 = df_clinical[[
"record_id",
"data_saturacao_o2",
"saturacao_o2"
]]
df_saps_3 = df_clinical[[
"record_id",
"data_saps_3",
"saps_3"
]]
# Treatment dataframe
cols_treatment = [
"data_ventilacao",
"ventilacao",
"pao2_fio2",
"data_pronacao",
"pronacao",
"data_hemodialise",
"hemodialise"
]
df_treatment = df[df.redcap_repeat_instrument == "evolucao_tratamento_multiplo"]
df_treatment = df_treatment[["record_id"] + cols_treatment]
# Note that the treatment dataframe has four date. We will separate the columns accordingly
# just as we did for the clinical dataframe.
df_ventilacao = df_treatment[[
"record_id",
"data_ventilacao",
"ventilacao",
"pao2_fio2"
]]
df_pronacao = df_treatment[[
"record_id",
"data_pronacao",
"pronacao"
]]
df_hemodialise = df_treatment[[
"record_id" ,
"data_hemodialise",
"hemodialise"
]]
# Laboratory results dataframe
cols_laboratory = [
"leucocitos",
"linfocitos",
"neutrofilos",
"tgp",
"creatinina",
"pcr",
"d_dimero",
"il_6",
"plaquetas",
"rni",
"troponina",
"pro_bnp",
"bicarbonato",
"lactato"
]
df_laboratory = df[df.redcap_repeat_instrument == "evolucao_laboratorial_multiplo"]
df_laboratory = df_laboratory[["record_id", "data_resultados_lab"] + cols_laboratory]
# Image dataframe
cols_image = [
"uid_imagem",
"tipo_imagem",
"data_imagem",
"padrao_imagem_rsna",
"score_tc_dir_sup",
"score_tc_dir_med",
"score_tc_dir_inf",
"score_tc_esq_sup",
"score_tc_esq_med",
"score_tc_esq_inf"
]
df_image = df[df.redcap_repeat_instrument == "evolucao_imagem_multiplo"]
df_image.uid_imagem = df_image.uid_imagem.apply(lambda s: s.strip() if pd.notna(s) else s)
df_image = df_image[["record_id", "redcap_repeat_instance"] + cols_image]
df_image = pd.merge(
left=df_personal_data[["record_id", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"]],
right=df_image,
how="right",
on="record_id",
validate="one_to_many"
)
uids_internados = set(df_image[df_image.data_admissao_hospitalar.notna()].uid_imagem.unique())
# For images, we also have the data retrieved from the deep segmentation model. We need
# to enrich our dataframe with the percentage of healthy lungs, affected by ground-glass opacity
# and consolidation, and the amount of fat in patient's body.
cols_series_id = [
"record_id",
"redcap_repeat_instance",
"infer_series_id"
]
df_series_id = pd.read_csv(SERIES_ID_FPATH, sep=";")
df_series_id = df_series_id[cols_series_id]
df_series_id = df_series_id.drop_duplicates()
cols_segmentation = [
"UID_Plataforma",
"series_id",
"seg_consolidacao",
"seg_normal",
"seg_vf1",
"seg_vf2",
"seg_vf3",
"volume_pulmao",
"taxa_gordura",
"volume_gordura",
"mediastino"
]
tmp_data = []
df_seg_raw = pd.read_csv(SEGMENTATION_FPATH)
df_seg_raw = df_seg_raw[cols_segmentation]
df_seg_raw = df_seg_raw[df_seg_raw.volume_pulmao >= 1.]
df_seg_raw = pd.merge(left=df_series_id, right=df_seg_raw, left_on="infer_series_id", right_on="series_id", how="right")
# Each TC study might have multiple series. We need to select the one with
grouped = df_seg_raw.groupby("UID_Plataforma")
for uid_imagem, group in grouped:
if any(group.mediastino):
use_group = group[group.mediastino]
else:
use_group = group
sorted_group = use_group.sort_values("volume_pulmao")
tmp_data.append(
dict(sorted_group.iloc[-1])
)
df_seg = pd.DataFrame(tmp_data)
df_seg = df_seg[df_seg.seg_normal.notna()]
df_image = pd.merge(
left=df_image,
right=df_seg,
how="left",
on=["record_id", "redcap_repeat_instance"]
)
df_image[
["record_id", "redcap_repeat_instance", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"] + cols_image
].to_csv(os.path.join(BASE_DIR, "data", "TC_scans.csv"), index=False)
df_image = df_image.rename({"redcap_repeat_instance": "redcap_repeat_instance_image"})
df_matches = df_image[
(df_image.seg_normal.notna()) & (df_image.data_admissao_hospitalar.notna())
]
df_matches[
["record_id", "data_admissao_hospitalar", "instituicao", "data_imagem", "uid_imagem"]
].to_csv(os.path.join(BASE_DIR, "data", "matches.csv"), index=False)
n_matches = df_matches.uid_imagem.nunique()
print(f"{n_matches} between REDCap and segmentation\n")
# COVID-19 confirmation dataframe
df_confirmation = df[df.redcap_repeat_instrument == "confirmacao_covid_multiplo"]
############################## Finished processing the results data ##############################
# Now we are going to create a dataframe that each row corresponds to a moment in the patient stay at the
# hospital. For each date in the patient history, we will update the row with the latest information about
# that patient.
# First, we need to define some helper functions to work on the processing of the data.
def get_group(grouped, key, default_columns):
"""
Gets a group by key from a Pandas Group By object. If the key does not exist, returns an empty
group with the default columns.
"""
if key in grouped.groups:
group = grouped.get_group(key)
else:
group = pd.DataFrame([], columns=default_columns)
return group
def last_register_before_date(registers, date_col, date, default_columns):
"""
Gets the last register before a reference date in a dataframe. If there are no register before the
date, returns an empty register with the default columns.
"""
registers = registers[registers[date_col].notna()]
registers_before_date = registers[
registers[date_col].apply(parser.parse) <= date
]
if len(registers_before_date) == 0:
registers_before_date = pd.DataFrame([(np.nan for col in default_columns)], columns=default_columns)
last_register = registers_before_date.iloc[-1]
return last_register
# Theb, we need to group by patient all the dataframes we built previously.
grouped_dispneia = df_dispneia.groupby("record_id")
grouped_sofa = df_sofa.groupby("record_id")
grouped_saturacao_o2 = df_saturacao_o2.groupby("record_id")
grouped_saps_3 = df_saps_3.groupby("record_id")
grouped_image = df_image.groupby("record_id")
grouped_laboratory = df_laboratory.groupby("record_id")
grouped_ventilacao = df_ventilacao.groupby("record_id")
grouped_pronacao = df_pronacao.groupby("record_id")
grouped_hemodialise = df_hemodialise.groupby("record_id")
# Now we iterate over the personal data dataframe, which has one row per patient.
after_discharge = []
after_death = []
new_rows = []
for i, row in tqdm(df_personal_data.iterrows(), total=len(df_personal_data)):
record_id = row["record_id"]
institution = row["instituicao"]
hospitalization_date = row["data_admissao_hospitalar"]
discharge_date = row["data_alta_hospitalar"]
date_of_death = row["data_obito"]
if pd.notna(date_of_death):
date_of_death = parser.parse(date_of_death)
if pd.notna(discharge_date):
discharge_date = parser.parse(discharge_date)
if pd.notna(hospitalization_date):
hospitalization_date = parser.parse(hospitalization_date)
# Get each group and sort by the date
group_dispneia = get_group(
grouped_dispneia, record_id, df_dispneia.columns
).sort_values("data_dispneia")
group_sofa = get_group(
grouped_sofa, record_id, df_sofa.columns
)
group_saturacao_o2 = get_group(
grouped_saturacao_o2, record_id, df_saturacao_o2.columns
)
group_saps_3 = get_group(
grouped_saps_3, record_id, df_saps_3.columns
)
group_image = get_group(
grouped_image, record_id, df_image.columns
)
group_laboratory = get_group(
grouped_laboratory, record_id, df_laboratory.columns
)
group_ventilacao = get_group(
grouped_ventilacao, record_id, df_ventilacao.columns
)
group_pronacao = get_group(
grouped_pronacao, record_id, df_pronacao.columns
)
group_hemodialise = get_group(
grouped_hemodialise, record_id, df_hemodialise.columns
)
# List the dates available for the patient
patient_dates = set(filter(
pd.notna,
list(group_dispneia.data_dispneia) +
list(group_sofa.data_sofa) +
list(group_saturacao_o2.data_saturacao_o2) +
list(group_saps_3.data_saps_3) +
list(group_image.data_imagem) +
list(group_laboratory.data_resultados_lab) +
list(group_ventilacao.data_ventilacao) +
list(group_pronacao.data_pronacao) +
list(group_hemodialise.data_hemodialise)
))
patient_dates = funcy.lmap(parser.parse, patient_dates)
# Now we iterate over the dates of the patient retrieving the last register for
# each group.
new_patient_rows = []
for date_tmp in patient_dates:
# If the date is after the patient's death or the patient's discharge, we want to ignore
# the register.
if abs(date_tmp.year - dataset_date.year) > 0:
continue
if pd.notna(date_of_death) and date_tmp > date_of_death:
after_death.append(record_id)
continue
if pd.notna(discharge_date) and date_tmp > discharge_date:
after_discharge.append(discharge_date)
continue
last_register_dispneia = last_register_before_date(group_dispneia, "data_dispneia", date_tmp, df_dispneia.columns)
last_register_sofa = last_register_before_date(group_sofa, "data_sofa", date_tmp, df_sofa.columns)
last_register_saturacao_o2 = last_register_before_date(group_saturacao_o2, "data_saturacao_o2", date_tmp, df_saturacao_o2.columns)
last_register_saps_3 = last_register_before_date(group_saps_3, "data_saps_3", date_tmp, df_saps_3.columns)
last_register_image = last_register_before_date(group_image, "data_imagem", date_tmp, df_image.columns)
last_register_laboratory = last_register_before_date(group_laboratory, "data_resultados_lab", date_tmp, df_laboratory.columns)
last_register_pronacao = last_register_before_date(group_pronacao, "data_pronacao", date_tmp, df_pronacao.columns)
last_register_hemodialise = last_register_before_date(group_hemodialise, "data_hemodialise", date_tmp, df_hemodialise.columns)
# Need for mechanical ventilation is one of our target variables. Thus, we do not want to get the last register before the
# current date. We want to know if the patient ever needed mechanical ventilation at any point in time.
ventilacao = group_ventilacao[group_ventilacao.ventilacao == group_ventilacao.ventilacao.max()].sort_values("data_ventilacao", ascending=False)
if len(ventilacao) == 0:
ventilacao = pd.DataFrame([(np.nan for col in group_ventilacao.columns)], columns=group_ventilacao.columns)
ventilacao = ventilacao.iloc[-1]
new_row = {}
new_row.update(row)
new_row.update(dict(last_register_dispneia))
new_row.update(dict(last_register_sofa))
new_row.update(dict(last_register_saturacao_o2))
new_row.update(dict(last_register_saps_3))
new_row.update(dict(last_register_image))
new_row.update(dict(last_register_laboratory))
new_row.update(dict(last_register_pronacao))
new_row.update(dict(last_register_hemodialise))
new_row.update(dict(ventilacao))
new_row["data"] = date_tmp
new_row["record_id"] = record_id
new_row["instituicao"] = institution
new_row["dias_desde_admissao"] = (date_tmp - hospitalization_date).days if pd.notna(hospitalization_date) else np.nan
date_of_outcome = date_of_death if pd.notna(date_of_death) else discharge_date
new_row["dias_antes_desfecho"] = (date_of_outcome - date_tmp).days if pd.notna(date_of_outcome) else np.nan
new_patient_rows.append(new_row)
new_rows.extend(new_patient_rows)
df_final = pd.DataFrame(new_rows)
# We need to calculate some dummy variables for the categorical data.
padrao_rsna_dummies = pd.get_dummies(df_final.padrao_imagem_rsna, prefix="padrao_rsna")
ventilacao_dummies = pd.get_dummies(df_final.ventilacao, prefix="ventilacao")
neoplasia_dummies = pd.get_dummies(df_final.neoplasia, prefix="neoplasia")
irc_dummies = | pd.get_dummies(df_final.irc, prefix="irc") | pandas.get_dummies |
import os
from pathlib import Path
import random
import time
import math
import json
import json
import shutil
import inspect
import warnings
import logging
import functools
from concurrent.futures import ThreadPoolExecutor
import dask
from dask.diagnostics import ProgressBar
from copy import deepcopy
from tqdm import tqdm
import numpy as np
import pandas as pd
from PIL import Image
import torch
import rasterio
import rtree
import pyproj
import cv2
import scipy.ndimage
import skimage.morphology
import shapely
import geopandas as gpd
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from torchvision.ops import nms
from deepforest import main, preprocess, predict, visualize
from urbantree.setting import Setting
def distance(points):
"""
calculate the distance of two points
Parameters
----------
points : list
a list of two points. E.g. `[[x1, y1], [x2, y2]]`
Returns
-------
float
the distance of two points
"""
p1, p2 = points
return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))
def calc_rectangle_bbox(points, img_h, img_w):
"""
calculate bbox from a rectangle.
Parameters
----------
points : list
a list of two points. E.g. `[[x1, y1], [x2, y2]]`
img_h : int
maximal image height
img_w : int
maximal image width
Returns
-------
dict
corresponding bbox. I.e. `{ 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax }`
"""
lt, rb = points
xmin, ymin = lt
xmax, ymax = rb
xmin = min(max(0, xmin), img_w)
xmax = min(max(0, xmax), img_w)
ymin = min(max(0, ymin), img_h)
ymax = min(max(0, ymax), img_h)
return { 'xmin':xmin, 'ymin':ymin, 'xmax':xmax, 'ymax':ymax }
def calc_circle_bbox(points, img_h, img_w):
"""
calculate bbox from a circle.
Parameters
----------
points : list
a list of two points. E.g. `[[x1, y1], [x2, y2]]`
img_h : int
maximal image height
img_w : int
maximal image width
Returns
-------
dict
corresponding bbox. I.e. `{ 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax }`
"""
center = points[0]
dist = distance(points)
xmin = center[0] - dist
xmax = center[0] + dist
ymin = center[1] - dist
ymax = center[1] + dist
xmin = min(max(0, xmin), img_w)
xmax = min(max(0, xmax), img_w)
ymin = min(max(0, ymin), img_h)
ymax = min(max(0, ymax), img_h)
return { 'xmin':xmin, 'ymin':ymin, 'xmax':xmax, 'ymax':ymax }
def generate_response(dataset_train_dir, dataset_response_dir,
model_trainer_min_bbox_size,
model_trainer_min_bbox_ratio,
model_trainer_validation_ratio,
model_trainer_patch_sizes=[],
model_trainer_patch_overlap_size=32,
**ignored):
"""
generate response of training dataset with labelme annotation results.
Parameters
----------
dataset_train_dir : str
the path to input training dataset folder with labelme annotation results in json
dataset_response_dir : str
the path to output training response folder for further training with torch vision
model_trainer_min_bbox_size : int
minimal size of object bbox which should be trained
model_trainer_min_bbox_ratio : float
minimal ratio (short_side/long_side) of object bbox
model_trainer_validation_ratio : float
train/validation split ratio
model_trainer_patch_sizes : list of int
a list of patch sizes for training images
model_trainer_patch_overlap_size : int
overlapping size of cropped training images with the given patch size
"""
TRAINING_IMG_DIR = Path(dataset_train_dir)
def glob_json(path):
return list(path.glob('*.json'))
# TRAINING_IMG_PATH can be a list of directories
LABELME_RESULTS = [glob_json(TRAINING_IMG_DIR)] \
if not isinstance(TRAINING_IMG_DIR, list) \
else list(map(glob_json, TRAINING_IMG_DIR))
# output folder of response (annotation csv fot torch vision)
RESPONSE_DEEPFOREST_DIR = Path(dataset_response_dir)
PATCH_SIZES = model_trainer_patch_sizes
PATCH_OVERLAP_SIZE = model_trainer_patch_overlap_size
VALIDATION_RATIO = model_trainer_validation_ratio
TARGET_MIN_SIZE = model_trainer_min_bbox_size
TARGET_MIN_RATIO = model_trainer_min_bbox_ratio
assert TARGET_MIN_RATIO <= 1 and TARGET_MIN_RATIO > 0, "0 < model_trainer_min_bbox_ratio <= 1"
#############################################################
# need to start from scratch
if RESPONSE_DEEPFOREST_DIR.exists() and os.listdir(RESPONSE_DEEPFOREST_DIR):
raise RuntimeError("Directory is not empty: " + str(RESPONSE_DEEPFOREST_DIR))
os.makedirs(RESPONSE_DEEPFOREST_DIR, exist_ok=True)
def bbox_size(bbox):
return (bbox['xmax'] - bbox['xmin']) * (bbox['ymax'] - bbox['ymin'])
def bbox_ratio(bbox):
return (bbox['xmax'] - bbox['xmin']) / (bbox['ymax'] - bbox['ymin'])
print("Processing source labelme annotation:", TRAINING_IMG_DIR)
all_df = []
for dir_id, label_jsons in enumerate(LABELME_RESULTS):
for lbme in label_jsons:
# labelme json file
lbme_path = Path(lbme)
image_json = None
with open(lbme) as f:
image_json = json.load(f)
img_h = image_json['imageHeight']
img_w = image_json['imageWidth']
img_dir = lbme.parent
src_img_name = image_json['imagePath']
dest_img_name = str(dir_id) + '-' + src_img_name
# copy training data to response folder
shutil.copy(img_dir.joinpath(src_img_name),
RESPONSE_DEEPFOREST_DIR.joinpath(dest_img_name))
rows_list = []
# process geometries
for shape in image_json['shapes']:
shape_type = shape['shape_type']
shape_label = shape['label']
shape_points = shape['points']
bbox = None
if shape_type == 'circle':
bbox = calc_circle_bbox(points=shape_points, img_h=img_h, img_w=img_w)
elif shape_type == 'rectangle':
bbox = calc_rectangle_bbox(points=shape_points, img_h=img_h, img_w=img_w)
else:
raise "FIXME"
if (bbox_size(bbox) >= TARGET_MIN_SIZE) and \
((bbox_ratio(bbox) >= TARGET_MIN_RATIO) and (bbox_ratio(bbox) <= 1.0/TARGET_MIN_RATIO)):
row = { 'image_path': dest_img_name, **bbox, 'label': shape_label }
rows_list.append(row)
df = pd.DataFrame(rows_list)
dest = RESPONSE_DEEPFOREST_DIR / (str(dir_id) + '-' + lbme_path.stem + '.csv')
df.to_csv(dest, index=False)
all_df.append(df)
if len(all_df):
print("output response dir:", RESPONSE_DEEPFOREST_DIR)
# collect all annotation
combined_annotations = pd.concat(all_df, ignore_index=True)
combined_annotations.to_csv(RESPONSE_DEEPFOREST_DIR / "combined.all.csv_", index=False)
# random split to train/valid set
image_paths = combined_annotations.image_path.unique()
valid_paths = np.random.choice(image_paths, int(len(image_paths)*VALIDATION_RATIO))
valid_annotations = combined_annotations.loc[combined_annotations.image_path.isin(valid_paths)]
train_annotations = combined_annotations.loc[~combined_annotations.image_path.isin(valid_paths)]
train_annotations.to_csv(RESPONSE_DEEPFOREST_DIR / "combined.train.csv_", index=False)
valid_annotations.to_csv(RESPONSE_DEEPFOREST_DIR / "combined.valid.csv_" , index=False)
# cropping
print("Cropping response dataset:", RESPONSE_DEEPFOREST_DIR)
all_df = []
for _, PATCH_SIZE in enumerate(PATCH_SIZES):
for annotation_file in list(RESPONSE_DEEPFOREST_DIR.glob("*.csv")):
path_to_raster = RESPONSE_DEEPFOREST_DIR.joinpath(annotation_file.stem + ".tiff")
CROP_BASE_DIR = RESPONSE_DEEPFOREST_DIR / "crop" / str(PATCH_SIZE)
df = preprocess.split_raster(annotations_file=annotation_file,
path_to_raster=path_to_raster,
base_dir=CROP_BASE_DIR,
patch_size=PATCH_SIZE,
patch_overlap=1.0 * PATCH_OVERLAP_SIZE / PATCH_SIZE)
df['image_path'] = str(PATCH_SIZE) + '/' + df['image_path']
all_df.append(df)
# collect all annotation
if len(all_df):
print("output response dir:", (RESPONSE_DEEPFOREST_DIR / "crop"))
combined_annotations = | pd.concat(all_df, ignore_index=True) | pandas.concat |
"""WISDM dataset
URL of dataset: https://www.cis.fordham.edu/wisdm/includes/datasets/latest/WISDM_ar_latest.tar.gz
"""
import re
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Optional, Union, List, Tuple
from ..core import split_using_target, split_using_sliding_window
from .base import BaseDataset, check_path
__all__ = ['WISDM', 'load', 'load_raw']
# Meta Info
SUBJECTS = tuple(range(1, 36+1))
ACTIVITIES = tuple(['Walking', 'Jogging', 'Sitting', 'Standing', 'Upstairs', 'Downstairs'])
Sampling_Rate = 20 # Hz
class WISDM(BaseDataset):
"""
WISDMデータセットに記録されているセンサデータとメタデータを読み込む.
Parameters
----------
path: Path
WISDMデータセットのパス.
"""
def __init__(self, path:Path):
super().__init__(path)
def load(self, window_size:int, stride:int, ftrim_sec:int=3, btrim_sec:int=3, subjects:Optional[list]=None) -> Tuple[np.ndarray, np.ndarray]:
"""
WISDMデータセットを読み込み,sliding-window処理を行ったデータを返す.
Parameters
----------
window_size: int
フレーム分けするサンプルサイズ
stride: int
ウィンドウの移動幅
ftrim_sec: int
セグメント先頭のトリミングサイズ(単位は秒)
btrim_sec: int
セグメント末尾のトリミングサイズ(単位は秒)
subjects: Optional[list]
ロードする被験者を指定する.指定されない場合はすべての被験者のデータを返す.
被験者は計36名おり,それぞれに整数のIDが割り当てられている.値の範囲は[1, 36].
Returns
-------
(x_frames, y_frames): Tuple[np.ndarray, np.ndarray]
sliding-windowで切り出した入力とターゲットのフレームリスト
x_framesは3次元配列で構造は大まかに(Batch, Channels, Frame)のようになっている.
Channelsは加速度センサの軸を表しており,先頭からx, y, zである.
y_framesは2次元配列で構造は大まかに(Batch, Labels)のようになっている.
Labelsは先頭から順にactivity,subjectを表している.
y_framesではデータセット内の値をそのまま返すため,分類で用いる際はラベルの再割り当てが必要となることに注意.
Examples
--------
>>> wisdm_path = Path('path/to/dataset')
>>> wisdm = WISDM(wisdm_path)
>>>
>>> # 被験者1, 2, 3のみを読み込む
>>> x, y = wisdm.load(window_size=256, stride=256, ftrim_sec=0, btrim_sec=0, subjects=[1, 2, 3])
>>> print(f'x: {x.shape}, y: {y.shape}')
>>>
>>> # > x: (?, 3, 256), y: (?, 2)
"""
segments, meta = load(path=self.path)
segments = [m.join(seg) for seg, m in zip(segments, meta)]
x_frames, y_frames = [], []
for seg in segments:
fs = split_using_sliding_window(
np.array(seg), window_size=window_size, stride=stride,
ftrim=Sampling_Rate*ftrim_sec, btrim=Sampling_Rate*btrim_sec,
return_error_value=None)
if fs is not None:
x_frames += [fs[:, :, 3:]]
y_frames += [np.uint8(fs[:, 0, 0:2][..., ::-1])] # 多分これでact, subjectの順に変わる
else:
# print('no frame')
pass
x_frames = np.concatenate(x_frames).transpose([0, 2, 1])
y_frames = np.concatenate(y_frames)
# subject filtering
if subjects is not None:
flags = np.zeros(len(x_frames), dtype=bool)
for sub in subjects:
flags = np.logical_or(flags, y_frames[:, 1] == sub)
# flags = np.logical_or(flags, y_frames[:, 0] == sub)
x_frames = x_frames[flags]
y_frames = y_frames[flags]
return x_frames, y_frames
def load(path:Union[Path,str]) -> Tuple[List[pd.DataFrame], List[pd.DataFrame]]:
"""Function for loading WISDM dataset
Parameters
----------
path: Union[Path, str]
Directory path of WISDM dataset('data' directory)
Returns
-------
data, meta: List[pd.DataFrame], List[pd.DataFrame]
Sensor data segmented by activity and subject
See Alos
--------
The order of 'data' and 'meta' correspond.
e.g. meta[0] is meta data of data[0].
"""
path = check_path(path)
raw = load_raw(path)
data, meta = reformat(raw)
return data, meta
def load_raw(path:Path) -> pd.DataFrame:
"""Function for loading raw data of WISDM dataset
Parameters
----------
path: Path
Directory path of WISDM dataset('data' directory)
Returns
-------
raw_data : pd.DataFrame
raw data of WISDM dataset
See Also
--------
Structure of one segment:
np.ndarray([
[user id, activity id, timestamp, x-acceleration, y-acceleration, z-acceleration],
[user id, activity id, timestamp, x-acceleration, y-acceleration, z-acceleration],
...,
[user id, activity id, timestamp, x-acceleration, y-acceleration, z-acceleration],
], dtype=float64))
Range of activity label: [0, 5]
Range of subject label : [1, 36]
"""
path = path / 'WISDM_ar_v1.1_raw.txt'
with path.open('r') as fp:
whole_str = fp.read()
# データセットのmiss formatを考慮しつつ簡易パースを行う
# [基本構造]
# [user],[activity],[timestamp],[x-acceleration],[y-accel],[z-accel];
# [miss format]
# - ";"の前にコロンが入ってしまっている
# - ";"が抜けている
# - z-accelerationが抜けている(おそらく一か所だけ)
whole_str = whole_str.replace(',;', ';')
semi_separated = re.split('[;\n]', whole_str)
semi_separated = list(filter(lambda x: x != '', semi_separated))
comma_separated = [r.strip().split(',') for r in semi_separated]
# debug
for s in comma_separated:
if len(s) != 6:
print('[miss format?]: {}'.format(s))
raw_data = pd.DataFrame(comma_separated)
raw_data.columns = ['user', 'activity', 'timestamp', 'x-acceleration', 'y-acceleration', 'z-acceleration']
# z-accelerationには値が''となっている行が一か所だけ存在する
# このままだと型キャストする際にエラーが発生するためnanに置き換えておく
raw_data['z-acceleration'] = raw_data['z-acceleration'].replace('', np.nan)
# convert activity name to activity id
raw_data = raw_data.replace(list(ACTIVITIES), list(range(len(ACTIVITIES))))
raw_data = raw_data.astype({'user': 'uint8', 'activity': 'uint8', 'timestamp': 'uint64', 'x-acceleration': 'float64', 'y-acceleration': 'float64', 'z-acceleration': 'float64'})
raw_data[['x-acceleration', 'y-acceleration', 'z-acceleration']] = raw_data[['x-acceleration', 'y-acceleration', 'z-acceleration']].fillna(method='ffill')
return raw_data
def reformat(raw) -> Tuple[List[pd.DataFrame], List[pd.DataFrame]]:
"""Function for reformating
Parameters
----------
raw:
data loaded by 'load_raw'
Returns
-------
data, meta: List[pd.DataFrame], List[pd.DataFrame]
Sensor data segmented by activity and subject
See Alos
--------
The order of 'data' and 'meta' correspond.
e.g. meta[0] is meta data of data[0].
"""
raw_array = raw.to_numpy()
# segmentへの分割(by user and activity)
sdata_splited_by_subjects = split_using_target(src=raw_array, target=raw_array[:, 0])
segments = []
for sub_id in sdata_splited_by_subjects.keys():
for src in sdata_splited_by_subjects[sub_id]:
splited = split_using_target(src=src, target=src[:, 1])
for act_id in splited.keys():
segments += splited[act_id]
segments = list(map(lambda seg: pd.DataFrame(seg, columns=raw.columns).astype(raw.dtypes.to_dict()), segments))
data = list(map(lambda seg: | pd.DataFrame(seg.iloc[:, 3:], columns=raw.columns[3:]) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Copyright (C) Wise.io, Inc. 2016.
import numpy as np
import pandas.util.testing
import unittest
import collections
import pandas
import paratext_internal
import os
import random
import sys
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
from six.moves import range
import six
def generate_hell_frame(num_rows, num_columns, include_null=False, fmt='arbitrary'):
"""
Generate a DataFrame of columns containing randomly generated data.
"""
if include_null:
min_byte = 0
else:
min_byte = 1
frame = collections.OrderedDict()
seed = 0
keys = []
colfmts = {}
for column in range(num_columns):
key = "col%d" % (column,)
keys.append(key)
if fmt == 'mixed':
colfmts[key] = random.choice(["ascii","arbitrary","printable_ascii","utf-8"])
else:
colfmts[key] = fmt
for key in keys:
data = []
colfmt = colfmts[key]
for row in range(num_rows):
length = np.random.randint(50,1000)
if colfmt == 'arbitrary':
cell = paratext_internal.get_random_string(length, seed, min_byte, 255)
elif colfmt == 'ascii':
cell = paratext_internal.get_random_string(length, seed, min_byte, 127)
elif colfmt == 'printable_ascii':
cell = paratext_internal.get_random_string(length, seed, 32, 126)
elif colfmt == 'utf-8' or fmt == 'utf-8':
cell = paratext_internal.get_random_string_utf8(length, seed, include_null)
else:
raise ValueError("unknown format: " + fmt)
data.append(cell)
frame[key] = data
return pandas.DataFrame(frame)
@contextmanager
def generate_tempfile(filedata):
"""
A context manager that generates a temporary file object that will be deleted
when the context goes out of scope. The mode of the file is "wb".
Parameters
----------
filedata : The data of the file to write as a bytes object.
"""
f = NamedTemporaryFile(delete=False, mode="wb", prefix="paratext-tests")
f.write(filedata)
name = f.name
f.close()
yield f.name
os.remove(name)
@contextmanager
def generate_tempfilename():
"""
A context manager that generates a temporary filename that will be deleted
when the context goes out of scope.
"""
f = NamedTemporaryFile(delete=False, prefix="paratext-tests")
name = f.name
f.close()
yield f.name
os.remove(name)
def assert_seq_almost_equal(left, right):
left = np.asarray(left)
right = np.asarray(right)
left_is_string = np.issubdtype(left.dtype, np.str_) or np.issubdtype(left.dtype, np.unicode_) or left.dtype == np.object_
right_is_string = np.issubdtype(right.dtype, np.str_) or np.issubdtype(right.dtype, np.unicode_) or right.dtype == np.object_
if np.issubdtype(left.dtype, np.integer) and np.issubdtype(right.dtype, np.integer):
if not (left.shape == right.shape):
raise AssertionError("integer sequences have different sizes: %s vs %s" % (str(left.shape), str(right.shape)))
if not (left == right).all():
m = (left != right).mean() * 100.
raise AssertionError("integer sequences mismatch: %5.5f%% left=%s right=%s" % ((m, str(left[0:20]), str(right[0:20]))))
elif np.issubdtype(left.dtype, np.floating) and np.issubdtype(right.dtype, np.floating):
np.testing.assert_almost_equal(left, right)
elif left_is_string and not right_is_string:
if len(left) > 0 and len(right) > 0:
raise AssertionError("sequences differ by dtype: left is string and right is %s" % (str(right.dtype)))
elif not left_is_string and right_is_string:
if len(left) > 0 and len(right) > 0:
raise AssertionError("sequences differ by dtype: left is %s and right is string" % (str(left.dtype)))
elif left_is_string and right_is_string:
q = np.zeros((len(left)))
for i in range(len(q)):
q[i] = not paratext_internal.are_strings_equal(left[i], right[i])
m = q.mean() * 100.
if q.any():
raise AssertionError("object sequences mismatch: %5.5f%%, rows: %s" % (m, str(np.where(q)[0].tolist())))
else:
left_float = np.asarray(left, dtype=np.float_)
right_float = np.asarray(right, dtype=np.float_)
#if np.issubdtype(left.dtype, np.floating):
# left_float = left
#else:
#if np.issubdtype(right.dtype, np.floating):
# right_float = right
#else:
| pandas.util.testing.assert_almost_equal(left_float, right_float) | pandas.util.testing.assert_almost_equal |
import argparse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from configparser import ConfigParser
from bs4 import BeautifulSoup
import pandas as pd
import sys
import re
import os
import datetime
import tldextract
import csv
import json
import pyfiglet
import requests
import random
from collections import OrderedDict
from fnmatch import fnmatch
import ocrmypdf
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
import hashlib
## custom functions
from utilities.helpers import (makedirs, prep_request, write_csv, clean_string, text_excerpt, compress_text, pointcalc, write_file, clean_json, headers_all)
# process arguments
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--process", dest="check", default="csv", help="Specify what gets processed. '-p update' checks existing urls; '-p csv' reads urls from a csv.")
args = parser.parse_args()
whattodo = args.check
if whattodo == "csv":
# specify source file
# file should be a csv with 4 columns: 'source_urls','opening', 'middle', 'closing'
# 'opening' is the first few words where the relevant text begins
# 'closing' is the final words of the relevant text
# 'middle' is a snippet in the middle of the relevant text
source_file = 'source/big_test.csv'
elif whattodo == "update":
pass
else:
sys.exit("Specify source of urls. '-p update' checks existing urls; '-p csv' reads urls from a csv")
character = 100 # set minimum character count for text
cutoff = 6 # scoring threshold - 7 is currently max
count = 0
bad_urls = []
# specify profile with enabled browser extensions
'''
testing_profile = FirefoxProfile("/home/username/.mozilla/firefox/profile_name")
binary = FirefoxBinary("/usr/bin/firefox")
driver = webdriver.Firefox(firefox_profile=testing_profile, firefox_binary=binary)
ext_dir = '/home/username/.mozilla/firefox/profile_name/extensions/'
extensions = [
'<EMAIL>',
]
for e in extensions:
driver.install_addon(ext_dir + e, temporary=True)
'''
#uncomment to use standard gecko driver
driver = webdriver.Firefox()
# Create output directories
base = 'archive' # use for archiving policies
full_html = "full"
snippet_html = "snippet"
clean_text_dir = "text"
supporting_files = "files"
media = "media"
url_data = "url_data"
d = datetime.datetime.today()
year = d.strftime("%Y")
month = d.strftime("%m")
day = d.strftime("%d")
date_filename = year + "_" + month + "_" + day
#######################
#define dataframes
df_language = pd.DataFrame(columns=['check', 'text_len', 'text', 'mu_len', 'markup_snippet', 'full_page_len'])
thank_you = pd.DataFrame(columns=['source_urls', 'opening', 'middle', 'closing'])
#######################
## Let's get started ##
#######################
# get key data from existing url archives
makedirs(url_data)
makedirs(media)
file_ext = "*.json"
all_files = []
all_hash = []
all_urls = []
for path, subdirs, files in os.walk(url_data):
for f in files:
print("Processing " + f)
if fnmatch(f,file_ext):
appdata = os.path.join(path,f)
all_files.append(f)
with open(appdata) as input:
data = json.load(input)
all_urls.append(data['url'])
all_hash.append(data['text_hash'])
if data['current'] == "yes" and whattodo == "update":
url = data['url']
try:
opening = data['first']
except:
opening = ""
try:
middle = data['middle']
except:
middle = ""
try:
closing = data['last']
except:
closing = ""
ty_obj = | pd.Series([url, opening, middle, closing], index=thank_you.columns) | pandas.Series |
# ▣ 2.1 bit.ly의 1.usa.gov 데이터
path = 'PythonForDataAnalysis/ch02/usagov_bitly_data2012-03-16-1331923249.txt'
print(open(path).readline())
# - json 모듈의 loads 함수로 내려받은 샘플 파일을 한 줄씩 읽는다.
import json
path = 'PythonForDataAnalysis/ch02/usagov_bitly_data2012-03-16-1331923249.txt'
records = [json.loads(line) for line in open(path, encoding='utf-8')]
print(records[0])
# - records 의 개별 아이템에서 접근하려는 값의 키를 문자열로 넘겨서 쉽게 읽어온다.
print(records[0]['tz'])
# ■ 2.1.1 순수 파이썬으로 표준시간대 세어보기
time_zones = [rec['tz'] for rec in records if 'tz' in rec]
time_zones[:10]
# - 파이썬 표준 라이브러리를 통해 카운팅
def get_counts(sequence):
counts = {}
for x in sequence:
if x in counts:
counts[x] += 1
else:
counts[x] = 1
return counts
from collections import defaultdict
def get_counts2(sequence):
counts = defaultdict(int) # 값은 0으로 초기화된다.
for x in sequence:
counts[x] += 1
return counts
counts = get_counts(time_zones)
counts['America/New_York']
len(time_zones)
# - 가장 많이 등장하는 상위 10개의 표준시간대를 확인하는 방법
def top_counts(count_dict, n=10):
value_key_pairs = [(count, tz) for tz, count in count_dict.items()]
value_key_pairs.sort(reverse=True)
return value_key_pairs[:n]
top_counts(counts)
# - 파이썬 표준 라이브러리의 collections.Counter 클래스를 이용한 카운팅
from collections import Counter
counts = Counter(time_zones)
counts.most_common(10)
# ■ 2.1.2 pandas 로 표준시간대 세어보기
from pandas import DataFrame, Series
import pandas as pd
frame = DataFrame(records)
frame.info()
frame['tz'][:10]
# - Series 객체의 value_counts 메서드를 통해 카운팅
tz_counts = frame['tz'].value_counts()
tz_counts[:10]
# - plot 메서드를 통한 수평 막대 그래프 그리기
tz_counts[:10].plot(kind='barh', rot=0)
# - URL 을 축약하는 데 사용한 브라우저, 단말기, 애플리케이션에 대한 정보를 담은 필드
frame['a'][1]
frame['a'][50]
frame['a'][51]
results = Series([x.split()[0] for x in frame.a.dropna()])
results[:5]
results.value_counts()[:8]
# - agent 값이 없는 데이터를 제외
cframe = frame[frame.a.notnull()]
# - 각 행이 윈도우인지 아닌지 검사
import numpy as np
operating_system = np.where(cframe['a'].str.contains('Windows'), 'Windows', 'Not Windows')
operating_system[:5]
by_tz_os = cframe.groupby(['tz', operating_system])
# - size 함수로 그룹별 합계를 구하고, 결과는 unstack 함수를 이용해서 표로 재배치한다.
agg_counts = by_tz_os.size().unstack().fillna(0)
agg_counts[:10]
# - 전체 표준시간대의 순위를 모아보자.
indexer = agg_counts.sum(1).argsort()
indexer[:10]
# - agg_counts 에 take 를 사용해 행을 정렬된 순서 그대로 선택하고 마지막 10개 행만 잘라낸다.
count_subset = agg_counts.take(indexer)[-10:]
count_subset.plot(kind='barh', stacked=True)
# - 각 행에서 총합을 1로 정규화한 후 표를 만든다.
normed_subset = count_subset.div(count_subset.sum(1), axis=0)
normed_subset.plot(kind='barh', stacked=True)
# ▣ 2.2 MovieLens 의 영화 평점 데이터
import pandas as pd
encoding = 'latin1'
upath = 'PythonForDataAnalysis\\ch02\\users.dat'
rpath = 'PythonForDataAnalysis\\ch02\\ratings.dat'
mpath = 'PythonForDataAnalysis\\ch02\\movies.dat'
unames = ['user_id', 'gender', 'age', 'occupation', 'zip']
rnames = ['user_id', 'movie_id', 'rating', 'timestamp']
mnames = ['movie_id', 'title', 'genres']
users = pd.read_csv(upath, sep='::', header=None, names=unames, encoding=encoding)
ratings = pd.read_csv(rpath, sep='::', header=None, names=rnames, encoding=encoding)
movies = pd.read_csv(mpath, sep='::', header=None, names=mnames, encoding=encoding)
users[:5]
ratings[:5]
movies[:5]
ratings
# - 3 개의 테이블에 대해 merge 수행
data = pd.merge(pd.merge(ratings, users), movies)
data
data.ix[0]
# - 성별에 따른 각 영화의 평균 평점은 pivot_table 메서드를 사용해서 구한다.
| pd.pivot_table() | pandas.pivot_table |
# encoding: utf-8
from __future__ import division
import sys
import os
import time
import datetime
import pandas as pd
import numpy as np
import math
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
ADD_PATH = "%s/../"%(CURRENT_DIR)
sys.path.append(ADD_PATH)
from tools.mail import MyEmail
from tools.html import html_with_style
DATA_PATH = "%s/../data/mysql" % (CURRENT_DIR)
send_str = ''
today = datetime.datetime.today()
dt = today + datetime.timedelta(days = -1)
year = str(dt.year)
month = '0' + str(dt.month) if dt.month < 10 else str(dt.month)
day = '0' + str(dt.day) if dt.day < 10 else str(dt.day)
# load data
df_regist = | pd.read_csv(DATA_PATH+'/regist.'+year+'-'+month+'-'+day, encoding = 'utf-8') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
#Import Data
mbhmms_card = pd.read_csv("../s01-card/MBhmms/201006-MBhmms_pr_analysis.txt",sep='\t',skiprows=2,usecols=[0,4,5],names=["Family","Precision", "Recall"])
mbhmms_card["Database"] = "Resfams 2.0"
oldres_card = pd.read_csv("../s01-card/OldRes/201006-res_pr_analysis.txt",sep='\t',skiprows=2,usecols=[0,4,5],names=["Family","Precision", "Recall"])
oldres_card["Database"] = "Resfams 1.0"
mbhmms_ncbi = | pd.read_csv("../s02-ncbi/MBHmms/201007-MBHmms_pr_analysis.txt",sep='\t',skiprows=2,usecols=[0,4,5],names=["Family","Precision", "Recall"]) | pandas.read_csv |
# This file can download the rarity data from website rarity.tools
# There is a hidden API - "https://projects.rarity.tools/static/staticdata/<project_name>.json" that we can use to download rarity data in seconds.
# However the data is in raw format, we need to recalculate the scoring using the algorithm below.
# Without Traits Normalization, Rarity tools use a formula of Rarity Score = 1 / (Counts Of Traits X Total Number Of Tokens)
# With Traits Normalization, Rarity tools use a formula of Rarity Score = (Constant Number / ( Number Of Traits Type X Number Of Category In That Trait Types ) ) / (Counts Of Traits X Total Number Of Tokens)
# The Constant Number is 100,000 Divide by Total Number Of Tokens.
# The number 100,000 is an arbitary number chosen by Rarity tools I guess. I found these number by reverse engieering a few different Samples
# The calculation will give the exact Rarity Score shown in Rarity tools with a few exceptions
# There are some NFT projects that Rarity tools added Thematic Match / Matching Sets
# Due to those data irregularity, This script WILL NOT account for theses matches (So the scoring for projects with these Thematic Matches might differ)
# Revision: 1.0
# Date: Nov 17, 2021
# by @NFT131 #9693 (Discord Username)
# email: <EMAIL>
# If you need any clarification, email me. We can discuss more
def download(project_name="vogu", starting_count_y=1, normalize_trait=1):
# The variable "starting_count_y" is usually 1, but in Rare case, the count has to start at 2, due to irregular data structure used by Rarity tools
# Leave "normalize_trait" as default value 1, unless you want to turn it off. (Not recommended to turn it off, as normalize_trait will give better accuracy)
import csv
import requests
import time
import json
from pprint import pprint
import datetime
from utils import config
dt_now = datetime.datetime.utcnow()
start_time = time.time()
print("Project : " + str(project_name))
# Saves the rarity data into the Folder "rarity_data" provided
metadata_attributes_csv_file_name = f"{config.ATTRIBUTES_FOLDER}/{project_name}.csv"
# Saves the rarity data into the Folder "rarity_data" provided
metadata_scoring_csv_file_name = (
f"{config.RARITY_FOLDER}/{project_name}_raritytools.csv"
)
warning_flag = False
url = "https://projects.rarity.tools/static/staticdata/" + project_name + ".json"
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
}
response = requests.request("GET", url, headers=headers)
response_data = response.json()
all_traits = response_data["basePropDefs"]
number_of_traits_types = len(all_traits) - 1
nft_metadata = response_data["items"]
metadata_scoring = {}
metadata_to_save = {}
total_tokens_len = len(nft_metadata)
constant_number = (
1000000 / total_tokens_len
) # This constant number is used to normalize the scoring, I foudn it by reverse engieering a few samples
rarity_table = {}
# cut off the last element, if it is an empty array (cause problems with the script)
last_element_of_metadata = nft_metadata[0][len(nft_metadata[0]) - 1]
if (
isinstance(last_element_of_metadata, list)
and len(last_element_of_metadata) == 0
):
trailing_count_to_cut = 1
else:
trailing_count_to_cut = 0
nft_metadata_len = len(nft_metadata[0]) - trailing_count_to_cut
# Looping through each token, and decode the metadata stored by rarity tools
# They use Numbers to represent the data, which makes the file size smaller to download
for x in range(0, total_tokens_len):
token_id = str(nft_metadata[x][0])
metadata_to_save.update({token_id: {"nft_traits": []}})
metadata_scoring.update(
{
token_id: {
"TOKEN_ID": token_id,
"TOKEN_NAME": project_name + " #" + str(token_id),
}
}
)
token_rarity_score = 0
token_ranking = 0
this_token_trait = []
each_trait_score = {}
# Looping through each Metadata of the token, and calculating rarity scores
for y in range(starting_count_y, nft_metadata_len):
# There is a chance that Thematic Match info is stored as "derivedPropDefs" from the data query. Skipping this condition.
# This happens while extracting "Wicked Craniums" project
if y >= len(all_traits):
warning_flag = True
break
this_trait_rarity_score = 0
temp_scoring = 0
# Some traits data is stored as List, so we have to loop it through
if isinstance(nft_metadata[x][y], list):
temp_scoring = 0
if len(nft_metadata[x][y]) == 0:
if normalize_trait:
try:
number_of_category = len(all_traits[y]["pvs"])
except:
print(y)
pprint(all_traits[y])
input(
"Error Found on the count above. Press Any key to continue."
)
token_rarity_score = token_rarity_score + (
constant_number
/ (number_of_traits_types * number_of_category)
) / (all_traits[y]["pvs"][0][1] / total_tokens_len)
this_trait_rarity_score = (
constant_number
/ (number_of_traits_types * number_of_category)
) / (all_traits[y]["pvs"][0][1] / total_tokens_len)
temp_scoring += this_trait_rarity_score
else:
token_rarity_score = token_rarity_score + 1 / (
all_traits[y]["pvs"][0][1] / total_tokens_len
)
temp_scoring += this_trait_rarity_score
else:
for each_theme in nft_metadata[x][y]:
if normalize_trait:
try:
number_of_category = len(all_traits[y]["pvs"])
except:
print(y)
pprint(all_traits[y])
input(
"Error Found on the count above. Press Any key to continue."
)
token_rarity_score = token_rarity_score + (
constant_number
/ (number_of_traits_types * number_of_category)
) / (all_traits[y]["pvs"][each_theme][1] / total_tokens_len)
this_trait_rarity_score = (
constant_number
/ (number_of_traits_types * number_of_category)
) / (all_traits[y]["pvs"][each_theme][1] / total_tokens_len)
temp_scoring += this_trait_rarity_score
else:
token_rarity_score = token_rarity_score + 1 / (
all_traits[y]["pvs"][each_theme][1] / total_tokens_len
)
this_trait_rarity_score = 1 / (
all_traits[y]["pvs"][each_theme][1] / total_tokens_len
)
temp_scoring += this_trait_rarity_score
else:
if normalize_trait:
# Skip the traits that doesn't contain keys: pvs -> very unusual
# Was spoted once with byopills project
if all_traits[y].get("pvs", "empty") == "empty":
break
else:
number_of_category = len(all_traits[y]["pvs"])
token_rarity_score = token_rarity_score + (
constant_number
/ (number_of_traits_types * number_of_category)
) / (
all_traits[y]["pvs"][nft_metadata[x][y]][1]
/ total_tokens_len
)
this_trait_rarity_score = (
constant_number
/ (number_of_traits_types * number_of_category)
) / (
all_traits[y]["pvs"][nft_metadata[x][y]][1]
/ total_tokens_len
)
else:
token_rarity_score = token_rarity_score + 1 / (
all_traits[y]["pvs"][nft_metadata[x][y]][1] / total_tokens_len
)
this_trait_rarity_score = 1 / (
all_traits[y]["pvs"][nft_metadata[x][y]][1] / total_tokens_len
)
if isinstance(nft_metadata[x][y], list):
for each_theme in nft_metadata[x][y]:
this_token_trait.append(
{
"node": {
"traitType": all_traits[y]["name"],
"value": all_traits[y]["pvs"][each_theme][0],
}
}
)
each_trait_score.update({all_traits[y]["name"]: temp_scoring})
else:
this_token_trait.append(
{
"node": {
"traitType": all_traits[y]["name"],
"value": all_traits[y]["pvs"][nft_metadata[x][y]][0],
}
}
)
each_trait_score.update(
{all_traits[y]["name"]: this_trait_rarity_score}
)
rarity_table.update({str(token_id): float(token_rarity_score)})
metadata_to_save[token_id].update({"nft_traits": this_token_trait})
metadata_scoring[token_id].update(each_trait_score)
# Sort all the rarity data base on the rarity scores (Descending Order)
sorted_rarity_table = sorted(
rarity_table.items(), key=lambda item: item[1], reverse=True
)
print("Number of Token : " + str(len(metadata_to_save)))
rarity_table_upload = {}
count = 1
# Adding ranking to each of the token
for each_item in range(0, len(sorted_rarity_table)):
rarity_table_upload.update(
{
str(sorted_rarity_table[each_item][0]): {
"rank": count,
"rarity_score": float(sorted_rarity_table[each_item][1]),
}
}
)
metadata_to_save[str(sorted_rarity_table[each_item][0])].update(
{"rank": count, "rarity_score": float(sorted_rarity_table[each_item][1])}
)
metadata_scoring[str(sorted_rarity_table[each_item][0])].update(
{"RARITY_SCORE": float(sorted_rarity_table[each_item][1]), "Rank": count}
)
count = count + 1
scoring_csv = []
# Adding scoring of each traits to each of the token
for each_token in metadata_scoring:
this_row = []
for each_col in metadata_scoring[each_token]:
this_row.append(metadata_scoring[each_token][each_col])
scoring_csv.append(this_row)
sorted_scoring_csv = sorted(
scoring_csv, key=lambda item: item[len(item) - 1], reverse=False
)
# Create Header
header_row = []
for each_col in metadata_scoring["1"]:
header_row.append(each_col)
# Save to csv file
with open(metadata_scoring_csv_file_name, "w") as f:
write = csv.writer(f)
write.writerow(header_row)
write.writerows(sorted_scoring_csv)
save_raw_attributes_csv(
collection=project_name,
raw_attributtes=metadata_to_save,
file_path=metadata_attributes_csv_file_name,
)
if warning_flag:
print(
"============\n WARNING\n==============\nThe rarity data you are trying to extrat might contain Thematic Match / Matching Sets that this script ignored. \nSo while you compare with Rarity Tools data, make sure Thematic Sets is turned off.\n\n"
)
print("--- %s seconds Taken to Download ---" % (time.time() - start_time))
def save_raw_attributes_csv(collection, raw_attributtes, file_path):
import pandas as pd
# List to store all tokens traits
trait_data = []
for token in raw_attributtes:
# empty dict to store this token traits
token_raw = dict()
token_raw["TOKEN_ID"] = token
token_raw["TOKEN_NAME"] = f"{collection} #{str(token)}"
for trait in raw_attributtes[token]["nft_traits"]:
if trait["node"]["traitType"] != "Trait Count":
token_raw[trait["node"]["traitType"]] = trait["node"]["value"]
trait_data.append(token_raw)
# convert list to pandas dataframe and save to disk
raw_attributes_csv = | pd.DataFrame.from_records(trait_data) | pandas.DataFrame.from_records |
from nltk.corpus import stopwords
import string, re
from collections import Counter
import wordcloud
import seaborn as sns
import regex as re
import numpy as np # linear algebra
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import os
"""
refer: https://www.kaggle.com/serkanpeldek/text-classification-with-embedding-conv1d
采取的预处理:
小写
过滤标点符号
过滤非字母字符
限制文本长度
过滤停用词
过滤短词(<=2)
过滤低频词(<=2)
...
"""
review_dataset_path = "../raw data/review_polarity/txt_sentoken"
# print(os.listdir(review_dataset_path))
# Positive and negative reviews folder paths
pos_review_folder_path = review_dataset_path + "/" + "pos"
neg_review_folder_path = review_dataset_path + "/" + "neg"
def load_text_from_textfile(path):
file = open(path, "r")
review = file.read()
file.close()
return review
def load_review_from_textfile(path):
return load_text_from_textfile(path)
def get_data_target(folder_path, file_names, review_type):
data = list()
target = list()
for file_name in file_names:
full_path = folder_path + "/" + file_name
review = load_review_from_textfile(path=full_path)
data.append(review)
target.append(review_type)
return data, target
# text processing
class MakeString:
def process(self, text):
return str(text)
class ReplaceBy:
def __init__(self, replace_by):
# replace_by is a tuple contains pairs of replace and by characters.
self.replace_by = replace_by
def process(self, text):
for replace, by in self.replace_by:
text = text.replace(replace, by)
return text
class LowerText:
def process(self, text):
return text.lower()
class ReduceTextLength:
def __init__(self, limited_text_length):
self.limited_text_length = limited_text_length
def process(self, text):
return text[:self.limited_text_length]
# word vector processing
class VectorizeText:
def __init__(self):
pass
def process(self, text):
return text.split()
class FilterPunctuation:
def __init__(self):
print("Punctuation Filter created...")
def process(self, words_vector):
reg_exp_filter_rule=re.compile("[%s]"%re.escape(string.punctuation))
words_vector=[reg_exp_filter_rule.sub("", word) for word in words_vector]
return words_vector
class FilterNonalpha:
def __init__(self):
print("Nonalpha Filter created...")
def process(self, words_vector):
words_vector=[word for word in words_vector if word.isalpha()]
return words_vector
class FilterStopWord:
def __init__(self, language):
self.language=language
print("Stopwords Filter created...")
def process(self, words_vector):
stop_words=set(stopwords.words(self.language))
words_vector=[word for word in words_vector if not word in stop_words]
return words_vector
class FilterShortWord:
def __init__(self, min_length):
self.min_length=min_length
print("Short Words Filter created...")
def process(self, words_vector):
words_vector=[word for word in words_vector if len(word)>=self.min_length]
return words_vector
# text processing
class TextProcessor:
def __init__(self, processor_list):
self.processor_list = processor_list
def process(self, text):
for processor in self.processor_list:
text = processor.process(text)
return text
class VocabularyHelper:
def __init__(self, textProcessor):
self.textProcessor=textProcessor
self.vocabulary=Counter()
def update(self, text):
words_vector=self.textProcessor.process(text=text)
self.vocabulary.update(words_vector)
def get_vocabulary(self):
return self.vocabulary
if __name__ == "__main__":
# Positive and negative file names
pos_review_file_names = os.listdir(pos_review_folder_path)
neg_review_file_names = os.listdir(neg_review_folder_path)
print(pos_review_file_names[0])
pos_data, pos_target = get_data_target(folder_path=pos_review_folder_path,
file_names=pos_review_file_names,
review_type="positive")
neg_data, neg_target = get_data_target(folder_path=neg_review_folder_path,
file_names=neg_review_file_names,
review_type="negative")
print("正样本数量:", len(pos_data))
print("负样本数量:", len(neg_data))
data = pos_data + neg_data
target_ = pos_target + neg_target
print("总共数据:", len(data))
# 标签转为数字,正1负0
le = LabelEncoder()
le.fit(target_)
target = le.transform(target_)
print(data[0])
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, stratify=target, random_state=24)
print("训练数据数:", len(X_train))
print("训练标签数:", len(y_train))
print("测试样本数:", len(X_test))
print("测试标签数", len(y_test))
fig, axarr = plt.subplots(nrows=1, ncols=2, figsize=(8, 4), sharey=True)
axarr[0].set_title("Number of samples in train")
sns.countplot(x=y_train, ax=axarr[0])
axarr[1].set_title("Number of samples in test")
sns.countplot(x=y_test, ax=axarr[1])
plt.show()
text_len = np.vectorize(len) # 向量化函数
text_lengths = text_len(X_train) # [6975 4351 3120 ... 1869 4961 3808]
mean_review_length = int(text_lengths.mean())
print("平均评论长度:", mean_review_length)
print("最小评论长度:", text_lengths.min())
print("最大评论长度:", text_lengths.max())
# 绘制文本长度的分布
sns.distplot(a=text_lengths)
# str()
makeString = MakeString()
# 过滤标点符号
replace_by = [(".", " "), ("?", " "), (",", " "), ("!", " "), (":", " "), (";", " ")]
replaceBy = ReplaceBy(replace_by=replace_by)
# 小写
lowerText = LowerText()
# 限制文本长度
FACTOR = 8
reduceTextLength = ReduceTextLength(limited_text_length=mean_review_length * FACTOR)
# 以空格分割
vectorizeText = VectorizeText()
# 过滤标点符号
filterPunctuation = FilterPunctuation()
# 过滤非字母字符
filterNonalpha = FilterNonalpha()
# 过滤停用词
filterStopWord = FilterStopWord(language="english")
# 过滤短词
min_length = 2
filterShortWord = FilterShortWord(min_length=min_length)
# 预处理列表
processor_list_1 = [makeString,
replaceBy,
lowerText,
reduceTextLength,
vectorizeText,
filterPunctuation,
filterNonalpha,
filterStopWord,
filterShortWord]
# 文本处理器
textProcessor1 = TextProcessor(processor_list=processor_list_1)
# 随机看一个训练样本
random_number = np.random.randint(0, len(X_train))
print("Original Review:\n", X_train[random_number][:500])
print("=" * 100)
print("Processed Review:\n", textProcessor1.process(text=X_train[random_number][:500]))
# 建立词汇表
vocabularyHelper = VocabularyHelper(textProcessor=textProcessor1)
for text in X_train:
vocabularyHelper.update(text)
vocabulary = vocabularyHelper.get_vocabulary()
print("词汇表长度:", len(vocabulary))
# Counter({'film': 7098, 'movie': 4432, 'one': 4429, 'like': 2842,...})
# 看一下频率最高的前10个词和最低的10个词
n = 10
print("{} most frequented words in vocabulary:{}".format(n, vocabulary.most_common(n)))
print("{} least frequented words in vocabulary:{}".format(n, vocabulary.most_common()[:-n - 1:-1]))
# 词云
vocabulary_list = " ".join([key for key, _ in vocabulary.most_common()])
plt.figure(figsize=(15, 35))
wordcloud_image = wordcloud.WordCloud(width=1000, height=1000,
background_color='white',
# stopwords = stopwords,
min_font_size=10).generate(vocabulary_list)
plt.xticks([])
plt.yticks([])
plt.imshow(wordcloud_image)
#过滤低频词
min_occurence = 2
vocabulary = Counter({key: value for key, value in vocabulary.items() if value > min_occurence})
print("{} least frequented words in vocabulary:{}".format(n, vocabulary.most_common()[:-n - 1:-1]))
print("Length of vocabulary after removing words occurenced less than {} times:{}".format(min_occurence,
len(vocabulary)))
# 过滤不在词典的
class FilterNotInVocabulary:
def __init__(self, vocabulary):
self.vocabulary = vocabulary
def process(self, words_vector):
words_vector = [word for word in words_vector if word in self.vocabulary]
return words_vector
# 连起来
class JoinWithSpace:
def __init__(self):
pass
def process(self, words_vector):
return " ".join(words_vector)
filterNotInVocabulary = FilterNotInVocabulary(vocabulary=vocabulary)
joinWithSpace = JoinWithSpace()
processor_list_2 = [makeString,
replaceBy,
lowerText,
reduceTextLength,
vectorizeText,
filterPunctuation,
filterNonalpha,
filterStopWord,
filterShortWord,
filterNotInVocabulary,
joinWithSpace
]
textProcessor2 = TextProcessor(processor_list=processor_list_2)
review = X_train[np.random.randint(0, len(X_train))]
print("Original Text:\n", review[:500])
processed_review = textProcessor2.process(review[:500])
print("=" * 100)
print("Processed Text:\n", processed_review)
for i,j in enumerate(data):
data[i] = textProcessor1.process(data[i])
data[i] = textProcessor2.process(data[i])
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, stratify=target, random_state=24)
print("Dataset splited into train and test parts...")
print("train data length :", len(X_train))
print("train target length:", len(y_train))
print()
print("test data length :", len(X_test))
print("test target length:", len(y_test))
print(X_test[0])
# """去除符号"""
# re_punct = r"[\[\]\{\}\:\;\'\"\,\<\.\>\/\?\\\|\`\!\@\#\$\%\^\&\*\(\)\-\_\=\+]"
# for i,j in enumerate(X_train):
# X_train[i] = re.sub(re_punct, "", j)
# X_train[i] = X_train[i].replace('\n', '')
# for i,j in enumerate(X_test):
# X_test[i] = re.sub(re_punct, "", j)
# X_test[i] = X_test[i].replace('\n', '')
# import random
#
# X = list(zip(X_train,y_train))
# Y = list(zip(X_test,y_test))
#
# print(Y)
# random.shuffle(X)
# print(Y)
# random.shuffle(Y)
#
# X_train = [i for i,j in X]
# y_train = [j for i,j in X]
# X_test = [i for i,j in Y]
# y_test = [j for i,j in Y]
# 字典中的key值即为csv中列名
dataframe = pd.DataFrame({'a_name': y_train[:1400], 'b_name': X_train[:1400]})
# 将DataFrame存储为csv,index表示是否显示行名,default=True
dataframe.to_csv("../base/data/movie/train.tsv", index=False, sep='\t', header=False)
dataframe = | pd.DataFrame({'a_name': y_train[1400:], 'b_name': X_train[1400:]}) | pandas.DataFrame |
import pandas as pd
import bioframe
import pyranges as pr
import numpy as np
from io import StringIO
def bioframe_to_pyranges(df):
pydf = df.copy()
pydf.rename(
{"chrom": "Chromosome", "start": "Start", "end": "End"},
axis="columns",
inplace=True,
)
return pr.PyRanges(pydf)
def pyranges_to_bioframe(pydf):
df = pydf.df
df.rename(
{"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
axis="columns",
inplace=True,
)
return df
def pyranges_overlap_to_bioframe(pydf):
## convert the df output by pyranges join into a bioframe-compatible format
df = pydf.df.copy()
df.rename(
{
"Chromosome": "chrom_1",
"Start": "start_1",
"End": "end_1",
"Start_b": "start_2",
"End_b": "end_2",
},
axis="columns",
inplace=True,
)
df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
df["chrom_2"] = df["chrom_1"].values
return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = | pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"]) | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = | DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pytest
from pandas.compat import PY3, StringIO, u
from pandas.errors import ParserError
from pandas import DataFrame
import pandas.util.testing as tm
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Test evaluator."""
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sktime.benchmarking.evaluation import Evaluator
from sktime.benchmarking.metrics import PairwiseMetric
from sktime.benchmarking.results import RAMResults
from sktime.series_as_features.model_selection import PresplitFilesCV
def dummy_results():
"""Results that are dummy."""
results = RAMResults()
results.cv = PresplitFilesCV()
results.save_predictions(
strategy_name="alg1",
dataset_name="dataset1",
index=np.array([1, 2, 3, 4]),
y_true=np.array([1, 1, 1, 1]),
y_pred=np.array([1, 1, 1, 1]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time= | pd.to_datetime(1605268800, unit="ms") | pandas.to_datetime |
# Third party imports
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from scipy.interpolate import interp1d
from scipy.integrate import odeint
# Local application imports
from modules.reaction_models import Model
from modules.gof import rSquared, MSE
def simpleLinearFit(time,k):
return k*time
def EER(conversion,time,yfit):
# calculate the mean square erron on the derivative of the experimental conversion
# EER : Explicit Euler eRror
# allocale residuals
residuals = np.zeros(len(conversion))
# in the bulk
for i in range(len(conversion)-1):
# experimental reaction rate
u = (conversion[i+1] - conversion[i])/( time[i+1] - time[i] )
# computed reaction rate
up = yfit[i]
# calculate residual
residuals[i] = u - up
# on the boundary
# experimental reaction rate
u = (conversion[-1] - conversion[-2])/( time[-1] - time[-2] )
# computed reaction rate
up = yfit[-1]
# calculate residual
residuals[-1] = u - up
# calculate sum of square residuals
ss_res = np.sum(residuals**2.0)
# return mean square error
return (1.0/len(conversion))*ss_res
def integralRateRegression(time,conversion,modelName):
# perform Non-Linear Regression
# fit the experimental integral rate conversion (g)
# calculate the Arrhenius rate constant (k)
# pick up the model
model = Model(modelName)
# define data
x = time
y = np.array([model.g(a) for a in conversion])
# fit integral rate
popt, pcov = curve_fit(simpleLinearFit,x,y)
# popt: optimal values for the parameters so that the sum of the squared residuals of f(xdata, *popt) - ydata is minimized.
k = popt[0] # Arrhenius rate constant
yfit = simpleLinearFit(time,k) # modeled integral reaction rate
return k, yfit
def conversionRegression(time,conversion,modelName):
# perform Non-Linear Regression
# fit the experimental conversion (conversion)
# calculate the Arrhenius rate constant (k)
# pick up the model
model = Model(modelName)
# define data
x = time
y = conversion
if modelName not in ['D2','D4']:
# take estimation from the integral rate regression
k_est, yfit = integralRateRegression(x,y,modelName)
# fit conversion
popt, pcov = curve_fit(model.alpha,x,y,p0=k_est) # p0 : initial guess
# popt: optimal values for the parameters so that the sum of the squared residuals of f(xdata, *popt) - ydata is minimized.
k = popt[0] # Arrhenius rate constant
yfit = np.array([model.alpha(t, k) for t in time]) # modeled conversion fraction
else:
# measure the mean square error on the linear integral rate
k, yfit = integralRateRegression(time,conversion,modelName)
return k, yfit
def differentialRateRegression(time,conversion,modelName):
# perform Non-Linear Regression
# fit the experimental differential rate conversion (f)
# calculate the Arrhenius rate constant (k)
# k_est: estimation for the Arrhenius constant
# ODE: da/dt = k f(a)
def RHS(t, k):
'Function that returns Ca computed from an ODE for a k'
def ODE(a, t):
return k * model.f(a)
u0 = conversion[0]
u_numerical = odeint(ODE, u0, t)
return u_numerical[:,0]
# pick up the model
model = Model(modelName)
# define data
x = time
y = conversion
# take estimation from the integral rate regression
k_est, yfit = integralRateRegression(x,y,modelName)
# fit ODE
popt, pcov = curve_fit(RHS, x, y, p0=k_est) # p0 : initial guess
# popt: optimal values for the parameters so that the sum of the squared residuals of f(xdata, *popt) - ydata is minimized.
k = popt[0] # Arrhenius rate constant from fitting
if modelName not in ['D2','D4']:
yfit = np.array([model.alpha(t, k) for t in time]) # modeled conversion fraction
else:
yfit = k*time # modeled integral rate
return k, yfit
def comprehensiveRegressor(time,conversion,models):
# arguments
# time (numpy array)
# conversion (numpy array)
# models (list of strings)
# returns
# a dataframe containg the fitting information
rate_constant_alpha = []
rate_constant_integral = []
rate_constant_differen = []
mse_coef_alpha = []
mse_coef_integral = []
mse_constant_differen = []
# loop over the models
for modelIndx, modelName in enumerate(models):
# integral rate regression
k_integral, mse_integral = integralRateRegression(time, conversion, modelName)
rate_constant_integral.append(k_integral)
mse_coef_integral.append(mse_integral)
# conversion regression
k_alpha, mse_alpha = conversionRegression(time, conversion, modelName, k_integral)
rate_constant_alpha.append(abs(k_alpha)) # bug: RuntimeWarning: invalid value encountered in sqrt
mse_coef_alpha.append(mse_alpha)
# differential rate regression
k_differen, mse_differen = differentialRateRegression(time, conversion, modelName, k_integral)
rate_constant_differen.append(abs(k_differen)) # bug: RuntimeWarning: invalid value encountered in sqrt
mse_constant_differen.append(mse_differen)
# pass the data to a dictionary
data = {'model': models,
'rate_constant - alpha': rate_constant_alpha,
'rate_constant - integral': rate_constant_integral,
'rate_constant - differential': rate_constant_differen,
'MSE - alpha': mse_coef_alpha,
'MSE - integral': mse_coef_integral,
'MSE - differential': mse_constant_differen}
# dictionary to dataframe
df = | pd.DataFrame(data) | pandas.DataFrame |
''' Essential packages '''
import os
import pickle as pkl
from datetime import datetime
import numpy as np
import pandas as pd
import pandas_datareader as pdr
class StockMarket:
def __init__(self, start_date=datetime(2010, 1, 1), end_date=datetime.now(), data_dir='data'):
self.start_date = start_date
self.end_date = end_date
self.price_data = dict()
self.data_dir = data_dir
def get_stock_price(self, symbols, log_return=True):
'''
args:
symbols: list of stock symbols, e.g. ['AAPL', 'AMZN', 'GOOGL']
log_return: boolean. log return
'''
# File name starts with dates
prefix = datetime.now().strftime('%Y%m%d')
done_list = [file.split('.')[0] for file in os.listdir(self.data_dir)]
# Load price data: if not exists, download with pdr.DataReader
for symbol in symbols:
if prefix+symbol in done_list:
with open(os.path.join(self.data_dir, prefix+symbol+'.pkl'), 'rb') as f:
price_data = pkl.load(f)
else:
price_data = pdr.DataReader(symbol, 'yahoo', start=datetime(2000,1,1), end=self.end_date)
with open(os.path.join(self.data_dir, prefix+symbol+'.pkl'), 'wb') as f:
pkl.dump(price_data, f)
price_data = price_data[self.start_date <= price_data.index]
price_data = price_data[price_data.index <= self.end_date]
self.price_data[symbol] = price_data
for symbol, df in self.price_data.items():
df.sort_index(ascending=False, inplace=True)
df['return'] = np.log(df['Adj Close'].shift(1) / df['Adj Close']) if log_return else df['Adj Close'].shift(1) / df['Adj Close']-1
self.price_data[symbol] = df.dropna() # remove rows with NaN
def get_stock_statistics(self):
'''
return:
df_return.columns: list of symbols
mean_return: array of size [len(self.price_data), 1]
covariance: array of size [len(self.price_data), len(self.price_data)]
'''
df_return = pd.DataFrame()
for symbol, df in self.price_data.items():
stock_return = df['return']
stock_return.name = symbol
df_return = | pd.concat([df_return, stock_return], axis=1) | pandas.concat |
from __future__ import print_function
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import shutil
import copy
from time import time
from datetime import timedelta
import h5py
tf.compat.v1.disable_eager_execution()
'''
CHRONOS: population modeling of CRISPR readcount data
<NAME> (<EMAIL>)
The Broad Institute
'''
def write_hdf5(df, filename):
if os.path.exists(filename):
os.remove(filename)
dest = h5py.File(filename, 'w')
try:
dim_0 = [x.encode('utf8') for x in df.index]
dim_1 = [x.encode('utf8') for x in df.columns]
dest_dim_0 = dest.create_dataset('dim_0', track_times=False, data=dim_0)
dest_dim_1 = dest.create_dataset('dim_1', track_times=False, data=dim_1)
dest.create_dataset("data", track_times=False, data=df.values)
finally:
dest.close()
def read_hdf5(filename):
src = h5py.File(filename, 'r')
try:
dim_0 = [x.decode('utf8') for x in src['dim_0']]
dim_1 = [x.decode('utf8') for x in src['dim_1']]
data = np.array(src['data'])
return pd.DataFrame(index=dim_0, columns=dim_1, data=data)
finally:
src.close()
def extract_last_reps(sequence_map):
'''get the sequence IDs of replicates at their last measured timepoint'''
rep_map = sequence_map[sequence_map.cell_line_name != 'pDNA']
last_days = rep_map.groupby('cell_line_name').days.max()
last_reps = rep_map[rep_map.days == last_days.loc[rep_map.cell_line_name].values].sequence_ID
return last_reps
def check_inputs(readcounts=None, guide_gene_map=None, sequence_map=None):
keys = None
sequence_expected = set(['sequence_ID', 'cell_line_name', 'days', 'pDNA_batch'])
guide_expected = set(['sgrna', 'gene'])
for name, entry in zip(['readcounts', 'guide_gene_map', 'sequence_map'], [readcounts, guide_gene_map, sequence_map]):
if entry is None:
continue
if not isinstance(entry, dict):
raise ValueError("Expected dict, but received %r" %entry)
if keys is None:
keys = set(entry.keys())
else:
if not set(entry.keys()) == keys:
raise ValueError("The keys for %s (%r) do not match the other keys found (%r)" % (name, keys, set(entry.keys)))
for key, val in entry.items():
if not isinstance(val, pd.DataFrame):
raise ValueError('expected Pandas dataframe for %s[%r]' %(name, key))
if name == 'readcounts':
assert val.index.duplicated().sum() == 0, "duplicated sequence IDs for readcounts %r" %key
assert not val.isnull().all(axis=1).any(), \
"All readcounts are null for one or more replicates in %s, please drop them" % key
assert not val.isnull().all(axis=0).any(),\
"All readcounts are null for one or more guides in %s, please drop them" % key
elif name == 'guide_gene_map':
assert not guide_expected - set(val.columns), \
"not all expected columns %r found for guide-gene map for %s. Found %r" %(guide_expected, key, val.columns)
assert val.sgrna.duplicated().sum() == 0, "duplicated sgRNAs for guide-gene map %r. Multiple gene alignments for sgRNAs are not supported." %key
elif name == 'sequence_map':
assert not sequence_expected - set(val.columns), \
"not all expected columns %r found for sequence map for %s. Found %r" %(sequence_expected, key, val.columns)
assert val.sequence_ID.duplicated().sum() == 0, "duplicated sequence IDs for sequence map %r" %key
for batch in val.query('cell_line_name != "pDNA"').pDNA_batch.unique():
assert batch in val.query('cell_line_name == "pDNA"').pDNA_batch.values, \
"there are sequences with pDNA batch %s in library %s, but no pDNA measurements for that batch" %(batch, key)
if val.days.max() > 50:
print("\t\t\tWARNING: many days (%1.2f) found for %s.\n\t\t\tThis may cause numerical issues in fitting the model.\n\
Consider rescaling all days by a constant factor so the max is less than 50." % (val.days.max(), key))
for key in keys:
if not readcounts is None and not sequence_map is None:
assert not set(readcounts[key].index) ^ set(sequence_map[key].sequence_ID), \
"\t\t\t mismatched sequence IDs between readcounts and sequence map for %r.\n\
Chronos expects `readcounts` to have guides as columns, sequence IDs as rows.\n\
Is your data transposed?" %key
if not readcounts is None and not guide_gene_map is None:
assert not set(readcounts[key].columns) ^ set(guide_gene_map[key].sgrna), \
"mismatched map keys between readcounts and guide map for %s" % key
def filter_guides(guide_gene_map, max_guides=15):
'''
removes sgRNAs that target multiple genes, then genes that have less than two guides.
Parameters:
`guide_gene_map` (`pandas.DataFrame`): See Model.__init__ for formatting of guide_gene_map
Returns:
`pandas.DataFrame`: filtered guide_gene_map
'''
alignment_counts = guide_gene_map.groupby("sgrna").gene.count()
guide_gene_map = guide_gene_map[guide_gene_map['sgrna'].isin(alignment_counts.loc[lambda x: x == 1].index)]
guide_counts = guide_gene_map.groupby('gene')['sgrna'].count()
guide_gene_map = guide_gene_map[guide_gene_map.gene.isin(guide_counts.loc[lambda x: (x > 1)& (x <= max_guides)].index)]
return guide_gene_map
def calculate_fold_change(readcounts, sequence_map):
'''
Calculates fold change as the ratio of the RPM+1 of late time points to pDNA
Parameters:
readcounts (`pandas.DataFrame`): readcount matrix with replicates on rows, guides on columns
sequence_map (`pandas.DataFrame`): has string columns "sequence_ID", "cell_line_name", and "pDNA_batch"
returns:
fold_change (`pd.DataFrame`)
'''
check_inputs(readcounts={'default': readcounts}, sequence_map={'default': sequence_map})
reps = sequence_map.query('cell_line_name != "pDNA"').sequence_ID
pdna = sequence_map.query('cell_line_name == "pDNA"').sequence_ID
rpm = pd.DataFrame(
(1e6 * readcounts.values.T / readcounts.sum(axis=1).values + 1).T,
index=readcounts.index, columns=readcounts.columns
)
fc = rpm.loc[reps]
norm = rpm.loc[pdna].groupby(sequence_map.set_index('sequence_ID')['pDNA_batch']).median()
try:
fc = pd.DataFrame(fc.values/norm.loc[sequence_map.set_index('sequence_ID').loc[reps, 'pDNA_batch']].values,
index=fc.index, columns=fc.columns
)
except Exception as e:
print(fc.iloc[:3, :3],'\n')
print(norm[:3], '\n')
print(reps[:3], '\n')
print(sequence_map[:3], '\n')
raise e
errors = []
# if np.sum(fc.values <= 0) > 0:
# errors.append("Fold change has zero or negative values:\n%r\n" % fc[fc <= 0].stack()[:10])
# if (fc.min(axis=1) >= 1).any():
# errors.append("Fold change has no values less than 1 for replicates\n%r" % fc.min(axis=1).loc[lambda x: x>= 1])
if errors:
raise RuntimeError('\n'.join(errors))
return fc
def nan_outgrowths(readcounts, sequence_map, guide_gene_map, absolute_cutoff=2, gap_cutoff=2):
'''
NaNs readcounts in cases where all of the following are true:
- The value for the guide/replicate pair corresponds to the most positive log fold change of all guides and all replicates for a cell line
- The logfold change for the guide/replicate pair is greater than `absolute_cutoff`
- The difference between the lfc for this pair and the next most positive pair for that gene and cell line is greater than gap_cutoff
Readcounts are mutated in place.
Parameters:
readcounts (`pandas.DataFrame`): readcount matrix with replicates on rows, guides on columns
sequence_map (`pandas.DataFrame`): has string columns "sequence_ID", "cell_line_name", and "pDNA_batch"
guide_gene_map (`pandas.DataFrame`): has string columns "sequence_ID", "cell_line_name", and "pDNA_batch"
'''
check_inputs(readcounts={'default': readcounts}, sequence_map={'default': sequence_map},
guide_gene_map={'default': guide_gene_map})
print('calculating LFC')
lfc = np.log2(calculate_fold_change(readcounts, sequence_map))
print('finding maximum LFC cells')
ggtemp = guide_gene_map.set_index('sgrna').gene.sort_index()
sqtemp = sequence_map.set_index('sequence_ID').cell_line_name.sort_index()
max_lfc = lfc.groupby(ggtemp, axis=1).max()
potential_cols = max_lfc.columns[max_lfc.max() > absolute_cutoff]
potential_rows= max_lfc.index[max_lfc.max(axis=1) > absolute_cutoff]
max_lfc = max_lfc.loc[potential_rows, potential_cols]
ggtemp = ggtemp[ggtemp.isin(potential_cols)]
sqtemp = sqtemp[sqtemp.isin(potential_rows)]
ggreversed = pd.Series(ggtemp.index.values, index=ggtemp.values).sort_index()
sqreversed = pd.Series(sqtemp.index.values, index=sqtemp.values).sort_index()
def second_highest(x):
if len(x) == 1:
return -np.inf
return x.values[np.argpartition(-x.values, 1)[1]]
max_row_2nd_column = lfc.T.groupby(ggtemp, axis=0).agg(second_highest).T
# print('constructing second of two second-highest matrices')
# max_col_2nd_row = lfc.groupby(ggtemp, axis=1).max()\
# .groupby(sqtemp, axis=0).agg(second_highest)
second_highest = max_row_2nd_column.loc[max_lfc.index, max_lfc.columns].values
# max_col_2nd_row.loc[max_lfc.index, max_lfc.columns].values
# )
gap = pd.DataFrame(max_lfc.values - second_highest, #second_highest
index=max_lfc.index, columns=max_lfc.columns)
print('finding sequences and guides with outgrowth')
cases = max_lfc[(max_lfc > absolute_cutoff) & (gap > gap_cutoff)]
cases = cases.stack()
print('%i (%1.5f%% of) readcounts to be removed' % (
len(cases),
100*len(cases)/np.product(readcounts.shape)
))
print(cases[:10])
problems = pd.Series()
for ind in cases.index:
block = lfc.loc[ind[0], ggreversed.loc[[ind[1]]]]
stacked = block[block == cases.loc[ind]]
guide = stacked.index[0]
problems.loc['%s&%s' % ind] = (ind[0], guide)
print('NaNing bad outgrowths')
for rep, guide in problems.values:
readcounts.loc[rep, guide] = np.nan
##################################################################
# M O D E L #
##################################################################
class Chronos(object):
'''
Model class for inferring effect of gene knockout from readcount data. Takes in readcounts, mapping dataframes, and hyperparameters at init,
then is trained with `train`.
Note on axes:
Replicates and cell lines are always the rows/major axis of all dataframes and tensors. Guides and genes are always the columns/second axis.
In cases where values vary per library, the object is a dict, and the library name is the key.
Notes on attribute names:
Attributes with single preceding underscores are tensorflow constants or tensorflow nodes, in analogy
with the idea of "private" attributes not meant to be interacted with directly. For tensorflow nodes,
there is usually a defined class attribute with no underscore which runs the node and returns
a pandas Series or DataFrame or dict of the same.
In other words `Chronos.v_a` (tensor) --(tensorflow function)-> `Chronos._a` (tensor) --(session run)-> `Chronos.a` (pandas object)
Some intermediate tensorflow nodes do not have corresponding numpy/pandas attributes.
Most parameters with a pandas interface can be set using the pandas interface. Do NOT try set tensorflow tensors directly - there
are usually transformations Chronos expects, such as rescaling time values. Use the pandas interface, i.e.
my_chronos_model.gene_effect = my_pandas_dataframe.
Every set of parameters that are fit per-library are dicts. If `Chronos.v_a` is a dict, the subsequent attributes in the graph are also dicts.
Settable Attributes: these CAN be set manually to interrogate the model or for other advanced uses, but NOT RECOMMENDED. Most users
will just want to read them out after training.
guide_efficacy (`pandas.Series`): estimated on-target KO efficacy of reagents, between 0 and 1
cell_efficacy (`dict` of `pandas.Series`): estimated cell line KO efficacy per library, between 0 and 1
growth_rate (`dict` of `pandas.Series`): relative growth rate of cell lines, positive float. 1 is the average of all lines in lbrary.
gene_effect ('pandas.DataFrame'): cell line by gene matrix of inferred change in growth rate caused by gene knockout
screen_delay (`pandas.Series`): per gene delay between infection and appearance of growth rate phenotype
initial_offset (`dict` of 'pandas.Series'): per sgrna estimated log fold pDNA error, per library. This value is exponentiated and mean-cented,
then multiplied by the measured pDNA to infer the actual pDNA RPM of each guide.
If there are fewer than 2 late time points, the mean of this value per gene is 0.
days (`dict` of `pandas.Series`): number of days in culture for each replicate.
learning_rate (`float`): current model learning rate. Will be overwritten when `train` is called.
Unsettable (Calculated) Attributes:
cost (`float`): the NB2 negative log-likelihood of the data under the current model, shifted to be 0 when the output RPM
perfectly matches the input RPM. Does not include regularization or terms involving only constants.
cost_presum (`dict` of `pd.DataFrame`): the per-library, per-replicate, per-guide contribution to the cost.
out (`dict` of `pd.DataFrame`): the per-library, per-replicate, per-guide model estimate of reads, unnormalized.
output_norm (`dict` of `pandas.DataFrame`): `out` normalized so the sum of reads for each replicate is 1.
efficacy (`pandas.DataFrame`): cell by guide efficacy matrix generated from the outer product of cell and guide efficacies
initial (`dict` of `pandas.DataFrame`): estimated initial abundance of guides
rpm (`dict` of `pandas.DataFrame`): the RPM of the measured readcounts / 1 million. Effectively a constant.
'''
default_timepoint_scale = .1 * np.log(2)
default_cost_value = 0.67
persistent_handles = set([])
def __init__(self,
readcounts,
#copy_number_matrix,
guide_gene_map,
sequence_map,
gene_effect_hierarchical=.1,
gene_effect_smoothing=.25,
kernel_width=5,
gene_effect_L1=0.1,
gene_effect_L2=0,
excess_variance=0.05,
guide_efficacy_reg=.5,
offset_reg=1,
growth_rate_reg=0.01,
smart_init=True,
cell_efficacy_guide_quantile=0.01,
initial_screen_delay=3,
scale_cost=0.67,
max_learning_rate=.02,
dtype=tf.double,
verify_integrity=True,
log_dir=None,
):
'''
Parameters:
readcounts (`dict` of `pandas.DataFrame`): Values are matrices with sequenced entities on rows,
guides as column headers, and total readcounts for the guide in the replicate as entries. There should be at least one key
for each library, but the user can also make separate individual datasets according to some other condition,
such as screening site.
sequence_map (`dict` of `pandas.DataFrame`): Keys must match the keys of readcounts. Values are tables with the columns:
sequence_ID: matches a row index in the corresponding readcounts matrix. Should uniquely identify a combination of
cell line, replicate, and sequence passage.
cell_line: name of corresponding cell line. 'pDNA' if this is a plasmid DNA or initial count measurement.
days: estimate number of cell days from infection when readcounts were performed. Plasmid DNA entries should be 0.
pDNA_batch: Unique identifier for associating readcounts to time 0 readcounts.
guide_gene_map (`dict` of `pandas.DataFrame`): Values are tables with the columns:
sgrna: guide sequence or unique guide identifier
gene: gene mapped to by guide. Genes should follow consistent naming conventions between libraries
gene_effect_hierarchical (`float`): regularization of individual gene effect scores towards the mean across cell lines
gene_effect_smoothing (`float`): regularization of individual gene scores towards mean after Gaussian kernel convolution
kernel_width (`float`): width (SD) of the Gaussian kernel for the smoothing regularization
gene_effect_L1 (`float`): regularization of gene effect CELL LINE MEAN towards zero with L1 penalty
gene_effect_L2 (`float`): regularization of individual gene scores towards zero with L2 penalty
offset_reg (`float`): regularization of pDNA error
growth_rate_reg (`float`): regularization of the negative log of the relative growth rate
guide_efficacy_reg (`float`): regularization of the gap between the two strongest guides' efficacy per gene,
or of the gap between them and 1 if only one late timepoint is present in readcounts for that library
excess_variance (`float` or `dict`): measure of Negative Binomial overdispersion for the cost function,
overall or per cell line and library.
max_learning_rate (`float`): passed to AdamOptimizer after initial burn-in period during training
verify_integrity (`bool`): whether to check each itnermediate tensor computed by Chronos for innappropriate values
log_dir (`path` or None): if provided, location where Tensorboard snapshots will be saved
cell_efficacy_init (`bool`): whether to initialize cell efficacies using the fold change of the most depleted guides
at the last timepoint
celll_efficacy_guide_quantile (`float`): quantile of guides to use to estimate cell screen efficacy. Between 0 and 0.5.
initial_screen_delay (`float`): how long after infection before growth phenotype kicks in, in days. If there are fewer than
3 late timepoints this initial value will be left unchanged.
dtype (`tensorflow.double` or `tensorflow.float`): numerical precision of the computation. Strongly recommend to leave this unchanged.
scale_cost (`bool`): The likelihood cost will be scaled to always be initially this value (default 0.67) for all data.
This encourages more consistent behavior across datasets when leaving the other regularization hyperparameters
constant. Pass 0, False, or None to avoid cost scaling.
'''
########################### I N I T I A L C H E C K S ############################
check_inputs(readcounts=readcounts, sequence_map=sequence_map, guide_gene_map=guide_gene_map)
sequence_map = self._make_pdna_unique(sequence_map, readcounts)
excess_variance = self._check_excess_variance(excess_variance, readcounts, sequence_map)
self.np_dtype = {tf.double: np.float64, tf.float32: np.float32}[dtype]
self.keys = list(readcounts.keys())
if scale_cost:
try:
scale_cost = float(scale_cost)
assert 0 < scale_cost, "scale_cost must be positive"
except:
raise ValueError("scale_cost must be None, False, or a semi-positive number")
#################### C R E A T E M A P P I N G S ########################
(self.guides, self.genes, self.all_guides, self.all_genes,
self.guide_map, self.column_map
) = self._get_column_attributes(readcounts, guide_gene_map)
(self.sequences, self.pDNA_unique, self.cells, self.all_sequences, \
self.all_cells, self.cell_indices, self.replicate_map, self.index_map,
self.line_index_map, self.batch_map
) = self._get_row_attributes(readcounts, sequence_map)
################## A S S I G N C O N S T A N T S #######################
print('\n\nassigning float constants')
self.guide_efficacy_reg = float(guide_efficacy_reg)
self.gene_effect_L1 = float(gene_effect_L1)
self.gene_effect_L2 = float(gene_effect_L2)
self.gene_effect_hierarchical = float(gene_effect_hierarchical)
self.growth_rate_reg = float(growth_rate_reg)
self.offset_reg = float(offset_reg)
self.gene_effect_smoothing = float(gene_effect_smoothing)
self.kernel_width = float(kernel_width)
self.cell_efficacy_guide_quantile = float(cell_efficacy_guide_quantile)
if not 0 < self.cell_efficacy_guide_quantile < .5:
raise ValueError("cell_efficacy_guide_quantile should be greater than 0 and less than 0.5")
self.nguides, self.ngenes, self.nlines, self.nsequences = (
len(self.all_guides), len(self.all_genes), len(self.all_cells), len(self.all_sequences)
)
self._excess_variance = self._get_excess_variance_tf(excess_variance)
self.median_timepoint_counts = self._summarize_timepoint(sequence_map, np.median)
self._initialize_graph(max_learning_rate, dtype)
self._gene_effect_mask, self.mask_count = self._get_gene_effect_mask(dtype)
self._days = self._get_days(sequence_map, dtype)
self._rpm, self._mask = self._get_late_tf_timepoints(readcounts, dtype)
self._measured_initial = self._get_tf_measured_initial(readcounts, sequence_map, dtype)
################## C R E A T E V A R I A B L E S #######################
print('\n\nBuilding variables')
(self.v_initial, self._initial_core,
self._initial, self._initial_offset, self._grouped_initial_offset) = self._get_initial_tf_variables(dtype)
(self.v_guide_efficacy, self._guide_efficacy) = self._get_tf_guide_efficacy(dtype)
(self.v_growth_rate, self._growth_rate, self._line_presence_boolean) = self._get_tf_growth_rate(dtype)
(self.v_cell_efficacy, self._cell_efficacy) = self._get_tf_cell_efficacy(dtype)
(self.v_screen_delay, self._screen_delay) = self._get_tf_screen_delay(initial_screen_delay, dtype)
(self.v_mean_effect, self.v_residue, self._residue, self._true_residue, self._combined_gene_effect
) = self._get_tf_gene_effect(dtype)
############################# C O R E M O D E L ##############################
print("\n\nConnecting graph nodes in model")
self._effective_days = self._get_effect_days(self._screen_delay, self._days)
self._gene_effect_growth = self._get_gene_effect_growth(self._combined_gene_effect, self._growth_rate)
self._efficacy, self._selected_efficacies = self._get_combined_efficacy(self._cell_efficacy,self. _guide_efficacy)
self._growth, self._change = self._get_growth_and_fold_change(self._gene_effect_growth, self._effective_days,
self._selected_efficacies)
self._out, self._output_norm = self._get_abundance_estimates(self._initial, self._change)
##################################### C O S T #########################################
print("\n\nBuilding all costs")
self._total_guide_reg_cost = self._get_guide_regularization(self._guide_efficacy, dtype)
self._smoothed_presum = self._get_smoothed_ge_regularization(self.v_mean_effect, self._true_residue, kernel_width, dtype)
self._initial_cost = self._get_initial_regularization(self._initial_offset)
self._cost_presum, self._cost, self._scale = self._get_nb2_cost(self._excess_variance, self._output_norm, self._rpm, self._mask,
dtype)
self.run_dict.update({self._scale: 1.0})
self._full_cost = self._get_full_cost(dtype)
######################### F I N A L I Z I N G ###################################
print('\nCreating optimizer')
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self._learning_rate)
default_var_list = [
self.v_mean_effect,
self.v_residue,
self.v_guide_efficacy,
self.v_initial,
self.v_growth_rate
]
# if all([val > 2 for val in self.median_timepoint_counts.values()]):
# "All libraries have sufficient timepoints to estimate screen_delay, adding to estimate"
# default_var_list.append(self.v_screen_delay)
self._ge_only_step = self.optimizer.minimize(self._full_cost, var_list=[self.v_mean_effect, self.v_residue])
self._step = self.optimizer.minimize(self._full_cost, var_list=default_var_list)
self._merged = tf.compat.v1.summary.merge_all()
if log_dir is not None:
print("\tcreating log at %s" %log_dir)
if os.path.isdir(log_dir):
shutil.rmtree(log_dir)
os.mkdir(log_dir)
self.log_dir = log_dir
self.writer = tf.compat.v1.summary.FileWriter(log_dir, self.sess.graph)
init_op = tf.compat.v1.global_variables_initializer()
print('initializing variables')
self.sess.run(init_op)
if scale_cost:
denom = self.cost
self.run_dict.update({self._scale: scale_cost/denom})
if smart_init:
print("estimating initial screen efficacy")
self.smart_initialize(readcounts, sequence_map, cell_efficacy_guide_quantile)
if verify_integrity:
print("\tverifying graph integrity")
self.nan_check()
self.epoch = 0
print('ready to train')
################################################################################################
############## I N I T I A L I Z A T I O N M E T H O D S #########################
################################################################################################
def get_persistent_input(self, dtype, data, name=''):
placeholder = tf.compat.v1.placeholder(dtype=dtype, shape=data.shape)
# Persistent tensor to hold the data in tensorflow. Helpful because TF doesn't allow
# graph definitions larger than 2GB (so can't use constants), and passing the feed dict each time is slow.
# This feature is poorly documented, but the handle seems to refer not to a tensor but rather a tensor "state" -
# the state of a placeholder that's been passed the feed dict. This is what persists. Annoyingly, it then becomes
# impossible to track the shape of the tensor.
state_handle = self.sess.run(tf.compat.v1.get_session_handle(placeholder), {placeholder: data})
# why TF's persistence requires two handles, I don't know. But it does.
tensor_handle, data = tf.compat.v1.get_session_tensor(state_handle.handle, dtype=dtype, name=name)
self.run_dict[tensor_handle] = state_handle.handle
self.persistent_handles.add(state_handle.handle)
return data
########################### I N I T I A L C H E C K S ############################
def _make_pdna_unique(self, sequence_map, readcounts):
#guarantee unique pDNA batches
sequence_map = {key: val.query('sequence_ID in %r' % list(readcounts[key].index)) for key, val in sequence_map.items()}
for key, val in sequence_map.items():
val['pDNA_batch'] = val['pDNA_batch'].apply(lambda s: '%s_%s' % (key, s))
return sequence_map
def _check_excess_variance(self, excess_variance, readcounts, sequence_map):
if not isinstance(excess_variance, dict):
try:
excess_variance = float(excess_variance)
except ValueError:
raise ValueError("if provided, excess_variance must be a dict of pd.Series per library or a float")
else:
for key, val in excess_variance.items():
assert key in readcounts, "excess_variance key %s not found in the rest of the data" % key
assert isinstance(val, pd.Series), "the excess_variance values provided for the different datasets must be pandas.Series objects, not\n%r" % val
diff = set(val.index) ^ set(sequence_map[key].cell_line_name)
assert len(diff) < 2, "difference between index values\n%r\nfor excess_variance and cell lines found in %s" % (diff, key)
return excess_variance
#################### C R E A T E M A P P I N G S ########################
def make_map(melted_map, outer_list, inner_list, dtype=np.float64):
'''
takes a sorted list of indices, targets, and a pd.Series that maps between them and recomputes the mapping between them
as two arrays of integer indices suitable for gather function calls.
The mapping can only include a subset of either the outer or inner list and vice versa.
The mapping's indices must be unique.
'''
melted_map = melted_map[melted_map.index.isin(outer_list) & melted_map.isin(inner_list)]
outer_array = np.array(outer_list)
gather_outer = np.searchsorted(outer_array, melted_map.index).astype(np.int)
inner_array = np.array(inner_list)
gather_inner = np.searchsorted(inner_array, melted_map.values).astype(np.int)
args = {
'gather_ind_inner': gather_inner,
'gather_ind_outer': gather_outer}
return args
def _get_column_attributes(self, readcounts, guide_gene_map):
print('\n\nFinding all unique guides and genes')
#guarantees the same sequence of guides and genes within each library
guides = {key: val.columns for key, val in readcounts.items()}
genes = {key: val.set_index('sgrna').loc[guides[key], 'gene'] for key, val in guide_gene_map.items()}
all_guides = sorted(set.union(*[set(v) for v in guides.values()]))
all_genes = sorted(set.union(*[set(v.values) for v in genes.values()]))
for key in self.keys:
print("found %i unique guides and %i unique genes in %s" %(
len(set(guides[key])), len(set(genes[key])), key
))
print("found %i unique guides and %i unique genes overall" %(len(all_guides), len(all_genes)))
print('\nfinding guide-gene mapping indices')
guide_map = {key:
Chronos.make_map(guide_gene_map[key][['sgrna', 'gene']].set_index('sgrna').iloc[:, 0],
all_guides, all_genes, self.np_dtype)
for key in self.keys}
column_map = {key: np.array(all_guides)[guide_map[key]['gather_ind_outer']]
for key in self.keys}
return guides, genes, all_guides, all_genes, guide_map, column_map
def _get_row_attributes(self, readcounts, sequence_map):
print('\nfinding all unique sequenced replicates, cell lines, and pDNA batches')
#guarantees the same sequence of sequence_IDs and cell lines within each library.
sequences = {key: val[val.cell_line_name != 'pDNA'].sequence_ID for key, val in sequence_map.items()}
pDNA_batches = {key: list(val[val.cell_line_name != 'pDNA'].pDNA_batch.values)
for key, val in sequence_map.items()}
pDNA_unique = {key: sorted(set(val)) for key, val in pDNA_batches.items()}
cells = {key: val[val.cell_line_name != 'pDNA']['cell_line_name'].unique() for key, val in sequence_map.items()}
all_sequences = sorted(set.union(*tuple([set(v.values) for v in sequences.values()])))
all_cells = sorted(set.union(*tuple([set(v) for v in cells.values()])))
#This is necessary consume copy number provided for only the cell-guide blocks present in each library
cell_indices = {key: [all_cells.index(s) for s in v]
for key, v in cells.items()}
assert len(all_sequences) == sum([len(val) for val in sequences.values()]
), "sequence IDs must be unique among all datasets"
for key in self.keys:
print("found %i unique sequences (excluding pDNA) and %i unique cell lines in %s" %(
len(set(sequences[key])), len(set(cells[key])), key
))
print("found %i unique replicates and %i unique cell lines overall" %(len(all_sequences), len(all_cells)))
print('\nfinding replicate-cell line mappings indices')
replicate_map = {key:
Chronos.make_map(sequence_map[key][['sequence_ID', 'cell_line_name']].set_index('sequence_ID').iloc[:, 0],
all_sequences, all_cells, self.np_dtype)
for key in self.keys}
index_map = {key: np.array(all_sequences)[replicate_map[key]['gather_ind_outer']]
for key in self.keys}
line_index_map = {key: np.array(all_cells)[replicate_map[key]['gather_ind_inner']]
for key in self.keys}
print('\nfinding replicate-pDNA mappings indices')
batch_map = {key:
Chronos.make_map(sequence_map[key][['sequence_ID', 'pDNA_batch']].set_index('sequence_ID').iloc[:, 0],
all_sequences, pDNA_unique[key], self.np_dtype)
for key in self.keys}
return sequences, pDNA_unique, cells, all_sequences, all_cells, cell_indices, replicate_map, index_map, line_index_map, batch_map
################## A S S I G N C O N S T A N T S #######################
def _get_excess_variance_tf(self, excess_variance):
_excess_variance = {}
for key in self.keys:
try:
_excess_variance[key] = tf.constant(excess_variance[key][self.line_index_map[key]].values.reshape((-1, 1)))
except IndexError:
raise IndexError("difference between index values for excess_variance and cell lines found in %s" % key)
except TypeError:
_excess_variance[key] = tf.constant(excess_variance * np.ones(shape=(len(self.line_index_map[key]), 1)))
return _excess_variance
def _summarize_timepoint(self, sequence_map, func):
out = {}
for key, val in sequence_map.items():
out[key] = func(val.groupby("cell_line_name").days.agg(lambda v: len(v.unique())).drop('pDNA').values)
return out
def _initialize_graph(self, max_learning_rate, dtype):
print('initializing graph')
self.sess = tf.compat.v1.Session()
self._learning_rate = tf.compat.v1.placeholder(shape=tuple(), dtype=dtype)
self.run_dict = {self._learning_rate: max_learning_rate}
self.max_learning_rate = max_learning_rate
self.persistent_handles = set([])
def _get_gene_effect_mask(self, dtype):
# excludes genes in a cell line with reads from only one library
print('\nbuilding gene effect mask')
if len(self.keys) == 1: #only one library, therefore no mask
_gene_effect_mask = tf.constant(1, shape=(len(self.all_cells), len(self.all_genes)), dtype=dtype)
mask_count = len(self.all_cells) * len(self.all_genes)
print("built mask with no exclusions")
return _gene_effect_mask, mask_count
else:
print(self.keys)
mask = {}#pd.DataFrame(0, index=self.all_cells, columns=self.all_genes, dtype=np.bool)
for cell in self.all_cells:
libraries = [key for key in self.keys if cell in self.cells[key]]
covered_genes = sorted(set.intersection(*[set(self.genes[key]) for key in libraries]))
mask[cell] = pd.Series(1, index=covered_genes, dtype=self.np_dtype)
mask = pd.DataFrame(mask).T.reindex(index=self.all_cells, columns=self.all_genes).fillna(0)
_gene_effect_mask = tf.constant(mask.values, dtype=dtype)
mask_count = (mask == 1).sum().sum()
print('made gene_effect mask, excluded %i (%1.5f) values' % ((mask == 0).sum().sum(), (mask == 0).mean().mean()))
return _gene_effect_mask, mask_count
def _get_days(self, sequence_map, dtype):
print('\nbuilding doubling vectors')
_days = {key:
tf.constant(Chronos.default_timepoint_scale * val.set_index('sequence_ID').loc[self.index_map[key]].days.astype(self.np_dtype).values,
dtype=dtype, shape=(len(self.index_map[key]), 1), name="days_%s" % key)
for key, val in sequence_map.items()}
for key in self.keys:
print("made days vector of shape %r for %s" %(
_days[key].get_shape().as_list(), key))
return _days
def _get_late_tf_timepoints(self, readcounts, dtype):
print("\nbuilding late observed timepoints")
_rpm = {}
_mask = {}
for key in self.keys:
rpm_np = readcounts[key].loc[self.index_map[key], self.column_map[key]].copy()
rpm_np = 1e6 * (rpm_np.values + 1e-32) / (rpm_np.fillna(0).values + 1e-32).sum(axis=1).reshape((-1, 1))
mask = pd.notnull(rpm_np)
_mask[key] = tf.constant(mask, dtype=tf.bool, name='NaN_mask_%s' % key)
rpm_np[~mask] = 0
_rpm[key] = self.get_persistent_input(dtype, rpm_np, name='rpm_%s' % key)
print("\tbuilt normalized timepoints for %s with shape %r (replicates X guides)" %(
key, rpm_np.shape))
return _rpm, _mask
def _get_tf_measured_initial(self, readcounts, sequence_map, dtype):
print('\nbuilding initial reads')
_measured_initial = {}
for key in self.keys:
rc = readcounts[key]
sm = sequence_map[key]
sm = sm[sm.cell_line_name == 'pDNA']
batch = rc.loc[sm.sequence_ID]
if batch.empty:
raise ValueError("No sequenced entities are labeled 'pDNA', or there are no readcounts for those that are")
if batch.shape[0] > 1:
batch = batch.groupby(sm.pDNA_batch.values).sum().astype(self.np_dtype)
else:
batch = pd.DataFrame({self.pDNA_unique[key][0]: batch.iloc[0]}).T.astype(self.np_dtype)
batch = batch.loc[self.pDNA_unique[key], self.column_map[key]]
if batch.isnull().sum().sum() != 0:
print(batch)
raise RuntimeError("NaN values encountered in batched pDNA")
initial_normed = batch.divide(batch.sum(axis=1), axis=0).values + 1e-8
_measured_initial[key] = tf.constant(initial_normed, name='measured_initial_%s' % key, dtype=dtype)
return _measured_initial
################## C R E A T E V A R I A B L E S #######################
def _get_initial_tf_variables(self, dtype):
print("\nbuilding initial reads estimate")
v_initial = {}
_initial_core = {}
_initial = {}
_initial_offset = {}
_grouped_initial_offset = {}
for key in self.keys:
initial_normed = self.sess.run(self._measured_initial[key], self.run_dict)
v_initial[key] = tf.Variable(np.zeros((initial_normed.shape[1], 1), dtype=self.np_dtype), dtype=dtype, name='initial_%s' % key)
_initial_offset[key] = tf.exp(v_initial[key] - tf.reduce_mean(input_tensor=v_initial[key]))
_grouped_initial_offset[key] = tf.transpose(a=tf.math.unsorted_segment_mean(
_initial_offset[key],
self.guide_map[key]['gather_ind_inner'],
num_segments=self.ngenes,
name='grouped_diff_%s' % key
))
_initial_core[key] = self._measured_initial[key] *\
tf.exp(tf.transpose(a=_initial_offset[key]) - tf.gather(_grouped_initial_offset[key],
self.guide_map[key]['gather_ind_inner'], axis=1))
_initial[key] = tf.gather(_initial_core[key] / tf.reshape(tf.reduce_sum(input_tensor=_initial_core[key], axis=1), shape=(-1, 1)),
self.batch_map[key]['gather_ind_inner'],
axis=0,
name='initial_read_est_%s' % key
)
print("made initial batch with shape %r for %s" %(
initial_normed.shape, key))
return v_initial, _initial_core, _initial, _initial_offset, _grouped_initial_offset
def _get_tf_guide_efficacy(self, dtype):
print("building guide efficacy")
v_guide_efficacy = tf.Variable(
#last guide is dummy
tf.random.normal(shape=(1, self.nguides+1), stddev=.01, dtype=dtype),
name='guide_efficacy_base', dtype=dtype)
_guide_efficacy = tf.exp(-tf.abs(v_guide_efficacy), name='guide_efficacy')
tf.compat.v1.summary.histogram("guide_efficacy", _guide_efficacy)
print("built guide efficacy: shape %r" %_guide_efficacy.get_shape().as_list())
return v_guide_efficacy, _guide_efficacy
def _get_tf_growth_rate(self, dtype):
print("building growth rate")
v_growth_rate = { key: tf.Variable(
tf.random.normal(shape=(self.nlines, 1), stddev=.01, mean=1, dtype=dtype),
name='growth_rate_base_%s' % key, dtype=dtype)
for key in self.keys}
_line_presence_mask = {key: tf.constant( np.array([s in self.cells[key] for s in self.all_cells], dtype=self.np_dtype).reshape((-1, 1)) )
for key in self.keys}
_line_presence_boolean = {key: tf.constant( np.array([s in self.cells[key] for s in self.all_cells], dtype=np.bool), dtype=tf.bool)
for key in self.keys}
_growth_rate_square = {key: (val * _line_presence_mask[key]) ** 2 for key, val in v_growth_rate.items()}
_growth_rate = {key: tf.divide(val, tf.reduce_mean(input_tensor=tf.boolean_mask(tensor=val, mask=_line_presence_boolean[key])),
name="growth_rate_%s" % key)
for key, val in _growth_rate_square.items()}
print("built growth rate: shape %r" % {key: val.get_shape().as_list()
for key, val in _growth_rate.items()})
return v_growth_rate, _growth_rate, _line_presence_boolean
def _get_tf_cell_efficacy(self, dtype):
print("\nbuilding cell line efficacy")
v_cell_efficacy = { key: tf.Variable(
tf.random.normal(shape=(self.nlines, 1), stddev=.01, mean=0, dtype=dtype),
name='cell_efficacy_base_%s' % key, dtype=dtype)
for key in self.keys}
_cell_efficacy = {key: tf.exp(-tf.abs(v_cell_efficacy[key]),
name='cell_efficacy_%s' % key)
for key in self.keys}
print("built cell line efficacy: shapes %r" % {key: v.get_shape().as_list() for key, v in _cell_efficacy.items()})
return v_cell_efficacy, _cell_efficacy
def _get_tf_screen_delay(self, initial_screen_delay, dtype):
print("building screen delay")
v_screen_delay = tf.Variable(np.sqrt(Chronos.default_timepoint_scale * initial_screen_delay) * np.ones((1, self.ngenes), dtype=self.np_dtype),
dtype=dtype)
_screen_delay = tf.square(v_screen_delay, name="screen_delay")
tf.compat.v1.summary.histogram("screen_delay", _screen_delay)
print("built screen delay")
return v_screen_delay, _screen_delay
def _get_tf_gene_effect(self, dtype):
print("building gene effect")
gene_effect_est = np.random.uniform(-.0001, .00005, size=(self.nlines, self.ngenes)).astype(self.np_dtype)
#self._combined_gene_effect = tf.Variable(gene_effect_est, dtype=dtype, name="Gene_Effect")
v_mean_effect = tf.Variable(np.random.uniform(-.0001, .00005, size=(1, self.ngenes)), name='GE_mean', dtype=dtype)
v_residue = tf.Variable(gene_effect_est, dtype=dtype, name='GE_deviation')
_residue = v_residue * self._gene_effect_mask
_true_residue = (
v_residue - (tf.reduce_sum(input_tensor=v_residue, axis=0)/tf.reduce_sum(input_tensor=self._gene_effect_mask, axis=0) )[tf.newaxis, :]
) * self._gene_effect_mask
_combined_gene_effect = v_mean_effect + _true_residue
tf.compat.v1.summary.histogram("mean_gene_effect", v_mean_effect)
print("built core gene effect: %i cell lines by %i genes" %tuple(_combined_gene_effect.get_shape().as_list()))
return v_mean_effect, v_residue, _residue, _true_residue, _combined_gene_effect
############################# C O R E M O D E L ##############################
def _get_effect_days(self, _screen_delay, _days):
print("\nbuilding effective days")
with tf.compat.v1.name_scope("days"):
_effective_days = {key:
tf.clip_by_value(val - _screen_delay, 0, 100)
for key, val in _days.items()}
print("built effective days, shapes %r" % {key: val.get_shape().as_list() for key, val in _effective_days.items()})
return _effective_days
def _get_gene_effect_growth(self, _combined_gene_effect, _growth_rate):
print('\nbuilding gene effect growth graph nodes')
with tf.compat.v1.name_scope('GE_G'):
_gene_effect_growth = {key: _combined_gene_effect * _growth_rate[key]
for key in self.keys}
print("built gene effect growth graph nodes, shapes %r" % {key: val.get_shape().as_list()
for key, val in _gene_effect_growth.items()})
return _gene_effect_growth
def _get_combined_efficacy(self, _cell_efficacy, _guide_efficacy):
print('\nbuilding combined efficacy')
with tf.compat.v1.name_scope('efficacy'):
_efficacy = {key:
tf.matmul(_cell_efficacy[key], tf.gather(_guide_efficacy, self.guide_map[key]['gather_ind_outer'], axis=1, name='guide'),
name="combined")
for key in self.keys} #cell line by all guide matrix
_selected_efficacies = {
key: tf.gather(#expand to replicates in given library
_efficacy[key],
self.replicate_map[key]['gather_ind_inner'],
name="replicate"
)
for key in self.keys
}
print("built combined efficacy, shape %r" % {key: v.get_shape().as_list()for key, v in _efficacy.items()})
print("built expanded combined efficacy, shapes %r" % {key: val.get_shape().as_list() for key, val in _selected_efficacies.items()})
return _efficacy, _selected_efficacies
def _get_growth_and_fold_change(self, _gene_effect_growth, _effective_days, _selected_efficacies):
print("\nbuilding growth estimates of edited cells and overall estimates of fold change in guide abundance")
_change = {}
_growth = {}
with tf.compat.v1.name_scope("FC"):
for key in self.keys:
_growth[key] = tf.gather(
tf.exp(
tf.gather(
_gene_effect_growth[key],
self.replicate_map[key]['gather_ind_inner'],
axis=0
) * _effective_days[key]
)-1,
self.guide_map[key]['gather_ind_inner'],
axis=1,
name="growth_%s" %key
)
_change[key] = tf.add(
np.float64(1.0),
tf.multiply(
_selected_efficacies[key],
_growth[key],
name="eff_mult"
),
name="FC_%s" % key
)
print("built growth and change")
return _growth, _change
def _get_abundance_estimates(self, _initial, _change):
print("\nbuilding unnormalized estimates of final abundance")
_out = {key: tf.multiply(_initial[key], _change[key], name="out_%s" % key)
for key in self.keys}
print("built unnormalized abundance")
print("\nbuilding normalized estimates of final abundance")
with tf.compat.v1.name_scope('out_norm'):
_output_norm = {key:
1e6 * tf.divide((val + 1e-32), tf.reshape(tf.reduce_sum(input_tensor=val, axis=1), shape=(-1, 1)),
name=key
)
for key, val in _out.items()}
print("built normalized abundance")
return _out, _output_norm
##################################### C O S T #########################################
def _get_guide_regularization(self, _guide_efficacy, dtype):
print('\nassembling guide efficacy regularization')
guide_map_pd = {}
max_guides = {}
_guide_reg_matrix = {}
_guide_reg_cost = {}
#guarantee that the "dummy" guide will never be the second most effective guide for any gene
fixed_adjust_np = np.zeros(self.nguides+1, dtype=self.np_dtype)
fixed_adjust_np[-1] = 1e6
_guide_reg_base = 1.0 / _guide_efficacy[0] + tf.constant(fixed_adjust_np, dtype=dtype)
lists = []
for key in self.keys:
guide_map_pd[key] = pd.Series(self.guide_map[key]['gather_ind_outer'],
index=self.guide_map[key]['gather_ind_inner']
).sort_index()
ngenes = guide_map_pd[key].index.nunique()
value_counts = pd.Series(self.guide_map[key]['gather_ind_inner']).value_counts()
max_guides[key] = value_counts.max()
nextras = sum(max_guides[key] - value_counts)
index = [gene for gene in guide_map_pd[key].index.unique() for i in range(max_guides[key] - value_counts[gene])]
dummies = pd.Series([self.nguides] * nextras, index=index)
guide_map_pd[key] = pd.concat([guide_map_pd[key], dummies]).sort_index()
reg_matrix_ind = guide_map_pd[key].values.reshape((ngenes, max_guides[key])).astype(np.int)
_guide_reg_matrix[key] = tf.sort(
tf.gather(_guide_reg_base, reg_matrix_ind, axis=0),
direction='ASCENDING',
name='sorted_guide_reg_%s' % key
)
if False:#all([val > 2 for val in self.median_timepoint_counts.values()]):
# only regularize gap between first and second most efficacious guide
_guide_reg_cost[key] = tf.reduce_mean(
input_tensor=_guide_reg_matrix[key][:, 1] - _guide_reg_matrix[key][:, 0],
name="guide_reg_cost_%s" % key
)
else:
#regularize total of first and second most efficacious guide - at least two guides must be near 1
_guide_reg_cost[key] = tf.reduce_mean(
input_tensor=_guide_reg_matrix[key][:, 1] + _guide_reg_matrix[key][:, 0],
name="guide_reg_cost_%s" % key
)
_total_guide_reg_cost = 1.0/len(_guide_reg_cost) * tf.add_n(list(_guide_reg_cost.values()))
return _total_guide_reg_cost
def _get_smoothed_ge_regularization(self, v_mean_effect, _true_residue, kernel_width, dtype):
print("building smoothed regularization")
kernel_size = int(6 * kernel_width)
kernel_size = kernel_size + kernel_size % 2 + 1 #guarantees odd width
kernel = np.exp( -( np.arange(kernel_size, dtype=self.np_dtype) - kernel_size//2 )**2/ (2*kernel_width**2) )
kernel = kernel / kernel.sum()
_kernel = tf.constant(kernel, dtype=dtype, name='kernel')[:, tf.newaxis, tf.newaxis]
_ge_argsort = tf.argsort(v_mean_effect[0])
_residue_sorted = tf.gather(_true_residue, _ge_argsort, axis=1)[:, :, tf.newaxis]
_residue_smoothed = tf.nn.convolution(input=_residue_sorted, filters=_kernel, padding='SAME')
_smoothed_presum = tf.square(_residue_smoothed)
return _smoothed_presum
def _get_initial_regularization(self, _initial_offset):
print("\nbuilding initial reads regularization/cost")
_initial_cost = {key:
tf.reduce_mean( input_tensor=tf.square(_initial_offset[key]),
name='cost_initial_%s' %key)
for key in self.keys
}
return _initial_cost
def _get_nb2_cost(self, _excess_variance, _output_norm, _rpm, _mask, dtype):
print('\nbuilding NB2 cost')
with tf.compat.v1.name_scope('cost'):
# the NB2 cost: (yi + 1/alpha) * ln(1 + alpha mu_i) - yi ln(alpha mu_i)
# modified with constants and -mu_i - which makes it become the multinomial cost in the limit alpha -> 0
_cost_presum = {key:
(
((_rpm[key]+1e-6) + 1./_excess_variance[key]) * tf.math.log(
(1 + _excess_variance[key] * (_output_norm[key] + 1e-6)) /
(1 + _excess_variance[key] * (_rpm[key] + 1e-6))
) +
(_rpm[key]+1e-6) * tf.math.log((_rpm[key] + 1e-6) / (_output_norm[key] + 1e-6) )
)
for key in self.keys}
_scale = tf.compat.v1.placeholder(dtype=dtype, shape=(), name='scale')
_cost = _scale/len(self.keys) * tf.add_n([tf.reduce_mean(input_tensor=tf.boolean_mask(tensor=v, mask=_mask[key]))
for key, v in _cost_presum.items()]
)
tf.compat.v1.summary.scalar("unregularized_cost", _cost)
return _cost_presum, _cost, _scale
def _get_full_cost(self, dtype):
print("building other regularizations")
with tf.compat.v1.name_scope('full_cost'):
self._L1_penalty = self.gene_effect_L1 * tf.square(tf.reduce_sum(input_tensor=self._combined_gene_effect)/self.mask_count)
self._L2_penalty = self.gene_effect_L2 * tf.reduce_sum(input_tensor=tf.square(self._combined_gene_effect))/self.mask_count
self._hier_penalty = self.gene_effect_hierarchical * tf.reduce_sum(input_tensor=tf.square(self._true_residue))/self.mask_count
self._growth_reg_cost = -self.growth_rate_reg * 1.0/len(self.keys) * tf.add_n([
tf.reduce_mean( input_tensor=tf.math.log(tf.boolean_mask(tensor=v, mask=self._line_presence_boolean[key])) )
for key, v in self._growth_rate.items()
])
self._guide_efficacy_reg = tf.compat.v1.placeholder(dtype, shape=())
self.run_dict[self._guide_efficacy_reg] = self.guide_efficacy_reg
self._guide_reg_cost = self._guide_efficacy_reg * self._total_guide_reg_cost
self._smoothed_cost = self.gene_effect_smoothing * tf.reduce_mean(input_tensor=self._smoothed_presum)
self._offset_reg = tf.compat.v1.placeholder(dtype, shape=())
self.run_dict[self._offset_reg] = self.offset_reg
self._initial_cost_sum = self._offset_reg * 1.0/len(self.keys) * tf.add_n(list(self._initial_cost.values()))
_full_cost = self._cost + \
self._L1_penalty + self._L2_penalty + self._guide_reg_cost + self._hier_penalty + \
self._growth_reg_cost + self._initial_cost_sum + \
self._smoothed_cost
tf.compat.v1.summary.scalar("L1_penalty", self._L1_penalty)
tf.compat.v1.summary.scalar("L2_penalty", self._L2_penalty)
tf.compat.v1.summary.scalar("hierarchical_penalty", self._hier_penalty)
return _full_cost
######################### F I N A L I Z I N G ###################################
def cell_efficacy_estimate(self, fold_change, sequence_map, last_reps, cell_efficacy_guide_quantile=.01):
'''
Estimate the maximum depletion possible in cell lines as the lowest X percentile guide fold-change in
the last timepoint measured. Multiple replicates for a cell line at the same last timepoint are median-collapsed
before the percentile is measured.
'''
fc = fold_change.loc[last_reps].groupby(sequence_map.set_index('sequence_ID').cell_line_name).median()
cell_efficacy = 1 - fc.quantile(cell_efficacy_guide_quantile, axis=1)
if (cell_efficacy <=0 ).any() or (cell_efficacy > 1).any() or cell_efficacy.isnull().any():
raise RuntimeError("estimated efficacy outside bounds. \n%r\n%r" % (cell_efficacy.sort_values(), fc))
return cell_efficacy
def smart_initialize(self, readcounts, sequence_map, cell_efficacy_guide_quantile):
cell_eff_est = {}
for key in self.keys:
print('\t', key)
sm = sequence_map[key]
last_reps = extract_last_reps(sm)
fc = calculate_fold_change(readcounts[key], sm)
cell_eff_est[key] = self.cell_efficacy_estimate(fc, sm, last_reps, cell_efficacy_guide_quantile)
self.cell_efficacy = cell_eff_est
def nan_check(self):
#labeled data
print('verifying user inputs')
for key in self.keys:
if pd.isnull(self.sess.run(self._days[key], self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._days[%s]" %key
if pd.isnull(self.sess.run(self._initial[key], self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._initial[%s]" %key
if pd.isnull(self.sess.run(self._rpm[key], self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._excess_variance[%s]" %key
if (self.sess.run(self._excess_variance[key], self.run_dict) < 0).sum().sum() > 0:
assert False, "negative values found in self._excess_variance[%s]" %key
#variables
print('verifying variables')
if pd.isnull(self.sess.run(self._combined_gene_effect, self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._combined_gene_effect"
if pd.isnull(self.sess.run(self.v_guide_efficacy, self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self.v_guide_efficacy"
if pd.isnull(self.sess.run(self._guide_efficacy, self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._guide_efficacy"
#calculated terms
print('verifying calculated terms')
for key in self.keys:
if pd.isnull(self.sess.run(self.v_cell_efficacy[key], self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self.v_cell_efficacy[%r]" % key
if pd.isnull(self.sess.run(self._efficacy[key], self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._efficacy[%r]" % key
print('\t' + key + ' _gene_effect')
if pd.isnull(self.sess.run(self._gene_effect_growth[key], self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._gene_effect_growth[%s]" %key
print('\t' + key + ' _selected_efficacies')
if pd.isnull(self.sess.run(self._selected_efficacies[key], self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._selected_efficacies[%s]" %key
print('\t' + key + ' _out')
if pd.isnull(self.sess.run(self._out[key], self.run_dict)).sum().sum() > 0:
assert False, "nulls found in self._out[%s]" %key
if (self.sess.run(self._out[key], self.run_dict) < 0).sum().sum() > 0:
assert False, "negatives found in self._out[%s]" %key
print('\t' + key + ' _output_norm')
df = self.sess.run(self._output_norm[key], self.run_dict)
if np.sum(pd.isnull(df).sum()) > 0:
assert False, "%i out of %i possible nulls found in self._output_norm[%s]" % (
np.sum(pd.isnull(df).sum()), np.prod(df.shape), key
)
if np.sum((df < 0).sum()) > 0:
assert False, "negative values found in output_norm[%s]" % key
print('\t' + key + ' _rpm')
if np.sum(pd.isnull(self.sess.run(self._rpm[key], self.run_dict)).sum()) > 0:
assert False, "nulls found in self._rpm[%s]" %key
min_rpm = self.sess.run(self._rpm[key], self.run_dict).min().min()
if min_rpm < 0:
raise ValueError("Negative Reads Per Million (RPM) found (%f)" % min_rpm)
min_output_norm = self.sess.run(self._output_norm[key], self.run_dict).min().min()
if min_output_norm < 0:
raise ValueError("Negative predicted normalized reads (output_norm) found (%f)" % min_output_norm)
print('\t' + key + ' _cost_presum')
df = self.cost_presum[key]
print("sess run")
if np.sum(pd.isnull(df).sum()) > 0:
print(df)
print()
print(self.sess.run(
tf.math.log(1 + self._excess_variance_expanded[key] * 1e6 * self._output_norm[key]), self.run_dict)
)
print()
print(self.sess.run(
(self._rpm[key]+1e-6) * tf.math.log(self._excess_variance_expanded[key] * (self._rpm[key] + 1e-6) )
, self.run_dict)
)
raise ValueError("%i nulls found in self._cost_presum[%s]" % (pd.isnull(df).sum().sum(), key))
print('\t' + key + ' _cost')
if pd.isnull(self.sess.run(self._cost, self.run_dict)):
assert False, "Cost is null"
print('\t' + key + ' _full_costs')
if pd.isnull(self.sess.run(self._full_cost, self.run_dict)):
assert False, "Full cost is null"
################################################################################################
#################### T R A I N I N G M E T H O D S ###############################
################################################################################################
def step(self, ge_only=False):
if ge_only:
self.sess.run(self._ge_only_step, self.run_dict)
else:
self.sess.run(self._step, self.run_dict)
self.epoch += 1
def train(self, nepochs=800, starting_learn_rate=1e-4, burn_in_period=50, ge_only=100, report_freq=50,
essential_genes=None, nonessential_genes=None, additional_metrics={}):
rates = np.exp(np.linspace(np.log(starting_learn_rate), np.log(self.max_learning_rate), burn_in_period))
start_time = time()
start_epoch = self.epoch
for i in range(start_epoch, start_epoch + nepochs):
try:
self.learning_rate = rates[self.epoch]
except IndexError:
self.learning_rate = self.max_learning_rate
self.step(ge_only=self.epoch < ge_only)
if not i%report_freq:
delta = time() - start_time
completed = i+1 - start_epoch
to_go = nepochs - completed
projected = delta * to_go/completed
if completed > 1:
print('%i epochs trained, time taken %s, projected remaining %s' %
(i+1, timedelta(seconds=round(delta)), timedelta(seconds=round(projected)))
)
print('cost', self.cost)
#print('pDNA MSE', {key: self.sess.run(self._initial_cost[key], self.run_dict) for key in self.keys})
print('relative_growth_rate')
for key, val in self.growth_rate.items():
print('\t%s max %1.3f, min %1.5f, mean %1.3f' % (
key, val[val!=0].max(), val[val!=0].min(), val[val!=0].mean()))
print('mean guide efficacy', self.guide_efficacy.mean())
print('initial_offset SD: %r' % [(key, self.initial_offset[key].std()) for key in self.keys])
print()
ge = self.gene_effect
print('gene mean', ge.mean().mean())
print('SD of gene means', ge.mean().std())
print("Mean of gene SDs", ge.std().mean())
for key, val in additional_metrics.items():
print(key, val(ge))
if essential_genes is not None:
print("Fraction Ess gene scores in bottom 15%%:", (ge.rank(axis=1, pct=True)[essential_genes] < .15).mean().mean()
)
print("Fraction Ess gene medians in bottom 15%%:", (ge.median().rank(pct=True)[essential_genes] < .15).mean()
)
if nonessential_genes is not None:
print("Fraction Ness gene scores in top 85%%:", (ge.rank(axis=1, pct=True)[nonessential_genes] > .15).mean().mean()
)
print("Fraction Ness gene medians in top 85%%:", (ge.median().rank(pct=True)[nonessential_genes] > .15).mean()
)
print('\n\n')
def save(self, directory, overwrite=False):
if os.path.isdir(directory) and not overwrite:
raise ValueError("Directory %r exists. To overwrite contents, use `overwrite=True`" % directory)
elif not os.path.isdir(directory):
os.mkdir(directory)
write_hdf5(self.gene_effect, os.path.join(directory, "chronos_ge_unscaled.hdf5"))
pd.DataFrame({"efficacy": self.guide_efficacy}).to_csv(os.path.join(directory, "guide_efficacy.csv"))
| pd.DataFrame(self.cell_efficacy) | pandas.DataFrame |
import copy
import logging
from os import posix_fallocate
from typing import Tuple
from d3m.metadata.hyperparams import List
import numpy as np
import pandas as pd
from d3m.container import dataset
from processing import pipeline
from sklearn import metrics
from processing import metrics as processing_metrics
from sklearn.preprocessing import label_binarize
logger = logging.getLogger(__name__)
class Scorer:
D3M_INDEX_IDX = 0
PREDICTION_IDX = 1
CONFIDENCE_IDX = 2
def __init__(self, logger, task, score_config, fitted_pipeline, target_idx):
self.logger = logger
self.solution_id = task.solution_id
# Assign configurations
self.dataset_uri = task.dataset_uri
self.method = score_config.method
self.metric = score_config.metric
self.shuffle = score_config.shuffle
self.random_seed = score_config.random_seed
self.stratified = score_config.stratified
self.num_folds = score_config.num_folds
self.train_size = score_config.train_size
self.fitted_pipeline = fitted_pipeline
self.target_idx = target_idx
self.pos_label = score_config.pos_label
def run(self):
# Attempt to load extant 'fit' solution
# fit_fn = utils.make_job_fn(self.solution_id)
# with open(fit_fn, 'rb') as f:
# unpacked = dill.load(f)
# runtime = unpacked['runtime']
# fitted_pipeline = unpacked['pipeline']
# Load the data to test
self.inputs = dataset.Dataset.load(self.dataset_uri)
# TODO: actually accept new data
if self.method == "holdout":
return self.hold_out_score()
elif self.method == "ranking":
return self.ranking()
# elif self.method == 'k_fold':
# #return self.k_fold_score()
else:
raise ValueError("Cannot score {} type".format(self.method))
def _get_pos_label(self, labels_series):
"""Return pos_label if needed, False if not needed.
sklearn binary scoring funcs run on indicator types just fine
but will break on categorical w/o setting pos_label kwarg
"""
# use any explicitly set positive label
if self.pos_label:
return self.pos_label
# can safely assume there is only one target for now, will have to change in the future
labels_dtype = labels_series.dtype.name
# not ideal to compare by string name, but direct comparison of the dtype will throw errors
# for categorical for older versions of pandas
if labels_dtype == "object" or labels_dtype == "category":
# (not in problem schema or data schema)
labels_list = labels_series.unique().tolist()
# since problem / data schema don't list positive label, we'll do a quick heuristic
if set(labels_list) == set(["0", "1"]):
return "1"
else:
# grab first label arbitrarily bc as of now, no good way to determine what is positive label
return labels_list[0]
return False
def _f1(self, true, preds):
pos_label = self._get_pos_label(true)
if pos_label:
return metrics.f1_score(true, preds, pos_label=pos_label)
return metrics.f1_score(true, preds)
def _precision(self, true, preds):
pos_label = self._get_pos_label()
if pos_label:
return metrics.precision_score(true, preds, pos_label=pos_label)
return metrics.precision_score(true, preds)
def _recall(self, true, preds):
pos_label = self._get_pos_label()
if pos_label:
return metrics.recall_score(true, preds, pos_label=pos_label)
return metrics.recall_score(true, preds)
def _roc_score(self, true, confidences, pos_label=None, average=None):
# roc_auc_score assumes that the confidence scores "must be the scores of
# the class with the greater label". In our case, they are (and should be) the confidence
# scores of the label the user identified as positive, which may not line up with the
# expectation of the roc_auc_score calculation. What the "greater label" actually is isn't
# ever clearly defined in the docs, but looking at the implementation it is the last label in
# in the list when unique(labels) is run. We mimic that logic here, and re-map the labels
# so that the positive label will be identified as the greater label within the roc_auc_score
# function.
if pos_label is not None:
labels = np.unique(true)
if len(labels) == 1 and pos_label != labels[0]:
labels[0] = pos_label
elif len(labels) > 1 and pos_label != labels[1]:
temp = labels[1]
labels[1] = pos_label
labels[0] = temp
true = label_binarize(true, labels)[:, 0]
if average is not None:
return metrics.roc_auc_score(
true, confidences, average=average, multi_class="ovr"
)
return metrics.roc_auc_score(true, confidences)
def _rmse_avg(self, true, preds):
return np.average(
metrics.mean_squared_error(true, preds, multioutput="raw_values") ** 0.5
)
def _score(self, metric, true, preds, confidences=None, pos_label=None):
if metric == "f1_micro":
score = metrics.f1_score(true, preds, average="micro")
elif metric == "f1_macro":
score = metrics.f1_score(true, preds, average="macro")
elif metric == "f1":
score = self._f1(true, preds)
elif metric == "roc_auc":
score = self._roc_score(true, confidences, pos_label=pos_label)
elif metric == "roc_auc_micro":
score = self._roc_score(true, confidences, average="micro")
elif metric == "roc_auc_macro":
score = self._roc_score(true, confidences, average="macro")
elif metric == "accuracy":
score = metrics.accuracy_score(true, preds)
elif metric == "precision":
score = self._precision(true, preds)
elif metric == "recall":
score = self._recall(true, preds)
elif metric == "mean_squared_error":
score = metrics.mean_squared_error(true, preds)
elif metric == "root_mean_squared_error":
score = metrics.mean_squared_error(true, preds) ** 0.5
elif metric == "root_mean_squared_error_avg":
score = self._rmse_avg(true, preds)
elif metric == "mean_absolute_error":
score = metrics.mean_absolute_error(true, preds)
elif metric == "r_squared":
score = metrics.r2_score(true, preds)
elif metric == "jaccard_similarity_score":
score = metrics.jaccard_similarity_score(true, preds)
elif metric == "normalized_mutual_information":
score = metrics.normalized_mutual_info_score(true, preds)
elif metric == "object_detection_average_precision":
self.logger.warning(f"{metric} metric unsuppported - returning 0")
score = 0.0
else:
raise ValueError("Cannot score metric {}".format(metric))
return score
def hold_out_score(self) -> Tuple[List[str], str]:
# produce predictions from the fitted model and extract to single col dataframe
# with the d3mIndex as the index
_in = copy.deepcopy(self.inputs)
results = pipeline.produce(self.fitted_pipeline, (_in,))
# Not sure how to do this properly - we assume that we will use `outputs.0` for scoring, but it is
# possible that, in a non-standard pipeline, `outputs.0` could be the output from another step,
# and `outputs.1` contains the predictions.
if len(results.values) > 1:
self.logger.warning(
"Pipeline produced > 1 outputs. Scoring first output only."
)
result_df = results.values["outputs.0"]
# get column names for convenience
d3m_index_col = result_df.columns[Scorer.D3M_INDEX_IDX]
confidence_col = (
result_df.columns[Scorer.CONFIDENCE_IDX]
if len(result_df.columns) > 2
else None
)
prediction_col = result_df.columns[Scorer.PREDICTION_IDX]
# check to see if this is problem with multiclass confidence results returned
multiclass_probabilities = not result_df[d3m_index_col].is_unique
# ensure results are sorted by index
if confidence_col:
# Get the predictions into sorted order by d3mIndex, confidence.
result_df[d3m_index_col] = pd.to_numeric(result_df[d3m_index_col])
result_df[confidence_col] = pd.to_numeric(result_df[confidence_col])
result_df.sort_values(
by=[d3m_index_col, confidence_col],
ascending=[True, False],
inplace=True,
)
else:
# no confidences, just ensure that the result is sorted by D3M index to ensure consistency
# with the ground truth.
result_df.sort_values(by=[d3m_index_col], inplace=True)
# extract the ground truth from the inputs
true_df = self.inputs["learningData"]
# check so if our labels are binary since this is what some of the sklearn primitives use as
# binary/multiclass criteria
binary_labels = len(true_df[prediction_col].unique()) <= 2
# extract confidence information into the format that the scoring
# funcdtions require (1 or 2 dimension ndarray)
confidences = self.extract_confidences(
result_df,
multiclass_probabilities,
binary_labels,
d3m_index_col,
confidence_col,
)
# take one label in case this is a multi index - previous sort should guarantee
# the top label is taken if confidences were assigned. This is the required format
# for metrics that just score on the label.
result_df.drop_duplicates(inplace=True, subset=d3m_index_col)
# take one label in the case this is a multi index
# put the ground truth into a single col dataframe
true_df[d3m_index_col] = pd.to_numeric(true_df[d3m_index_col])
true_df.drop_duplicates(inplace=True, subset=d3m_index_col)
true_df.sort_values(by=[d3m_index_col], inplace=True)
# only take the d3m indices that exist for results
true_df = true_df.set_index(true_df[d3m_index_col])
result_df = result_df.set_index(result_df[d3m_index_col])
result_df.index.rename("index", inplace=True)
true_df = true_df.loc[result_df.index]
result_series = result_df[prediction_col]
result_series.fillna(value=0, inplace=True)
true_series = true_df.iloc[:, self.target_idx]
# force the truth value to the same type as the predicted value
true_series = true_series.astype(result_series.dtype)
# validate metric against labels and probabilities that were returned
metric = processing_metrics.translate_metric(self.metric)
if metric in processing_metrics.confidence_metrics:
if confidences is None:
# requested a metric that requires confidences but none available
logger.warn(
f"cannot generate {metric} score - no confidence information available"
)
metric = "f1Macro"
elif (
multiclass_probabilities
and metric not in processing_metrics.multiclass_classification_metrics
):
# log a warning that we can't process the requested metric and assign a default
logger.warn(
f"cannot generate {metric} score - can't apply metric to multiclass targets"
)
if metric in processing_metrics.confidence_metrics:
metric = "rocAucMacro"
else:
metric = "f1Macro"
metric = processing_metrics.translate_d3m_metric(metric)
# return the score and the metric that was actually applied
return ([self._score(metric, true_series, result_series, confidences, self.pos_label)], metric)
def ranking(self):
# rank is always 1 when requested since the system only generates a single solution
if self.metric == "rank":
score = [1]
else:
raise ValueError(f"Cannot rank metric {self.metric}")
return (score, self.metric)
"""
def k_fold_score(self,):
fold_scores = []
kf = StratifiedKFold if self.stratified else KFold
kf = kf(n_splits=self.num_folds, shuffle=self.shuffle, random_state=self.random_seed)
features, targets = self._get_features_and_targets()
for train_index, test_index in kf.split(features, targets):
X_train, X_test = features.iloc[train_index, :], features.iloc[test_index, :]
y_train, y_test = targets.iloc[train_index, :], targets.iloc[test_index, :]
self.engine.variables['features'] = X_train
self.engine.variables['targets'] = y_train
self.engine.refit()
result = self.engine.model_produce(X_test)
score = self._score(self.metric, y_test, result)
fold_scores.append(score)
return fold_scores
"""
def extract_confidences(
self,
result_df: pd.DataFrame,
multiclass_probabilities: bool,
binary_labels: bool,
d3m_index_col: int,
confidence_col: int,
) -> np.ndarray:
confidence = None
if confidence_col is not None:
if multiclass_probabilities:
# if there are multiple probabilities extract them into a matrix
confidence = np.stack(
result_df.groupby(d3m_index_col)[confidence_col]
.apply(np.array)
.values
)
elif binary_labels:
# single probabilities - this is only allowed with binary labels
confidence = | pd.to_numeric(result_df[confidence_col]) | pandas.to_numeric |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses `Series` class, that is distributed version of `pandas.Series`."""
import numpy as np
import pandas
from pandas.core.common import apply_if_callable, is_bool_indexer
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
is_dict_like,
is_list_like,
)
from pandas._libs.lib import no_default
from pandas._typing import IndexKeyFunc
from pandas.util._decorators import doc
import sys
from typing import Union, Optional
import warnings
from modin.utils import _inherit_docstrings, to_pandas, Engine
from modin.config import IsExperimental, PersistentPickle
from .base import BasePandasDataset, _ATTRS_NO_LOOKUP
from .iterator import PartitionIterator
from .utils import from_pandas, is_scalar
from .accessor import CachedAccessor, SparseAccessor
from . import _update_engine
_doc_binary_operation = """
Return {operation} of Series and `{other}` (binary operator `{bin_op}`).
Parameters
----------
{other} : Series or scalar value
The second operand to perform computation.
Returns
-------
{returns}
"""
def _doc_binary_op(operation, bin_op, other="right", returns="Series"):
"""
Return callable documenting `Series` binary operator.
Parameters
----------
operation : str
Operation name.
bin_op : str
Binary operation name.
other : str, default: 'right'
The second operand name.
returns : str, default: 'Series'
Type of returns.
Returns
-------
callable
"""
doc_op = doc(
_doc_binary_operation,
operation=operation,
other=other,
bin_op=bin_op,
returns=returns,
)
return doc_op
@_inherit_docstrings(
pandas.Series, excluded=[pandas.Series.__init__], apilink="pandas.Series"
)
class Series(BasePandasDataset):
"""
Modin distributed representation of `pandas.Series`.
Internally, the data can be divided into partitions in order to parallelize
computations and utilize the user's hardware as much as possible.
Inherit common for DataFrames and Series functionality from the
`BasePandasDataset` class.
Parameters
----------
data : modin.pandas.Series, array-like, Iterable, dict, or scalar value, optional
Contains data stored in Series. If data is a dict, argument order is
maintained.
index : array-like or Index (1d), optional
Values must be hashable and have the same length as `data`.
dtype : str, np.dtype, or pandas.ExtensionDtype, optional
Data type for the output Series. If not specified, this will be
inferred from `data`.
name : str, optional
The name to give to the Series.
copy : bool, default: False
Copy input data.
fastpath : bool, default: False
`pandas` internal parameter.
query_compiler : BaseQueryCompiler, optional
A query compiler object to create the Series from.
"""
_pandas_class = pandas.Series
def __init__(
self,
data=None,
index=None,
dtype=None,
name=None,
copy=False,
fastpath=False,
query_compiler=None,
):
Engine.subscribe(_update_engine)
if isinstance(data, type(self)):
query_compiler = data._query_compiler.copy()
if index is not None:
if any(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existent columns or index values to constructor "
"not yet implemented."
)
query_compiler = data.loc[index]._query_compiler
if query_compiler is None:
# Defaulting to pandas
warnings.warn(
"Distributing {} object. This may take some time.".format(type(data))
)
if name is None:
name = "__reduced__"
if isinstance(data, pandas.Series) and data.name is not None:
name = data.name
query_compiler = from_pandas(
pandas.DataFrame(
pandas.Series(
data=data,
index=index,
dtype=dtype,
name=name,
copy=copy,
fastpath=fastpath,
)
)
)._query_compiler
self._query_compiler = query_compiler.columnarize()
if name is not None:
self._query_compiler = self._query_compiler
self.name = name
def _get_name(self):
"""
Get the value of the `name` property.
Returns
-------
hashable
"""
name = self._query_compiler.columns[0]
if name == "__reduced__":
return None
return name
def _set_name(self, name):
"""
Set the value of the `name` property.
Parameters
----------
name : hashable
Name value to set.
"""
if name is None:
name = "__reduced__"
self._query_compiler.columns = [name]
name = property(_get_name, _set_name)
_parent = None
# Parent axis denotes axis that was used to select series in a parent dataframe.
# If _parent_axis == 0, then it means that index axis was used via df.loc[row]
# indexing operations and assignments should be done to rows of parent.
# If _parent_axis == 1 it means that column axis was used via df[column] and assignments
# should be done to columns of parent.
_parent_axis = 0
@_doc_binary_op(operation="addition", bin_op="add")
def __add__(self, right):
return self.add(right)
@_doc_binary_op(operation="addition", bin_op="add", other="left")
def __radd__(self, left):
return self.add(left)
@_doc_binary_op(operation="union", bin_op="and", other="other")
def __and__(self, other):
if isinstance(other, (list, np.ndarray, pandas.Series)):
return self._default_to_pandas(pandas.Series.__and__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__and__(new_other)
@_doc_binary_op(operation="union", bin_op="and", other="other")
def __rand__(self, other):
if isinstance(other, (list, np.ndarray, pandas.Series)):
return self._default_to_pandas(pandas.Series.__rand__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__rand__(new_other)
# add `_inherit_docstrings` decorator to force method link addition.
@_inherit_docstrings(pandas.Series.__array__, apilink="pandas.Series.__array__")
def __array__(self, dtype=None): # noqa: PR01, RT01, D200
"""
Return the values as a NumPy array.
"""
return super(Series, self).__array__(dtype).flatten()
@property
def __array_priority__(self): # pragma: no cover
"""
Return pandas `__array_priority__` Series internal parameter.
Returns
-------
int
Internal pandas parameter ``__array_priority__`` used during interaction with NumPy.
"""
return self._to_pandas().__array_priority__
# FIXME: __bytes__ was removed in newer pandas versions, so Modin
# can remove it too.
def __bytes__(self):
"""
Return bytes representation of the Series.
Returns
-------
bytes
Notes
-----
Method is deprecated.
"""
return self._default_to_pandas(pandas.Series.__bytes__)
def __contains__(self, key):
"""
Check if `key` in the `Series.index`.
Parameters
----------
key : hashable
Key to check the presence in the index.
Returns
-------
bool
"""
return key in self.index
def __copy__(self, deep=True):
"""
Return the copy of the Series.
Parameters
----------
deep : bool, default: True
Whether the copy should be deep or not.
Returns
-------
Series
"""
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""
Return the deep copy of the Series.
Parameters
----------
memo : Any, optional
Deprecated parameter.
Returns
-------
Series
"""
return self.copy(deep=True)
def __delitem__(self, key):
"""
Delete item identified by `key` label.
Parameters
----------
key : hashable
Key to delete.
"""
if key not in self.keys():
raise KeyError(key)
self.drop(labels=key, inplace=True)
@_doc_binary_op(
operation="integer division and modulo",
bin_op="divmod",
returns="tuple of two Series",
)
def __divmod__(self, right):
return self.divmod(right)
@_doc_binary_op(
operation="integer division and modulo",
bin_op="divmod",
other="left",
returns="tuple of two Series",
)
def __rdivmod__(self, left):
return self.rdivmod(left)
def __float__(self):
"""
Return float representation of Series.
Returns
-------
float
"""
return float(self.squeeze())
@_doc_binary_op(operation="integer division", bin_op="floordiv")
def __floordiv__(self, right):
return self.floordiv(right)
@_doc_binary_op(operation="integer division", bin_op="floordiv")
def __rfloordiv__(self, right):
return self.rfloordiv(right)
def __getattr__(self, key):
"""
Return item identified by `key`.
Parameters
----------
key : hashable
Key to get.
Returns
-------
Any
Notes
-----
First try to use `__getattribute__` method. If it fails
try to get `key` from `Series` fields.
"""
try:
return object.__getattribute__(self, key)
except AttributeError as e:
if key not in _ATTRS_NO_LOOKUP and key in self.index:
return self[key]
raise e
def __int__(self):
"""
Return integer representation of Series.
Returns
-------
int
"""
return int(self.squeeze())
def __iter__(self):
"""
Return an iterator of the values.
Returns
-------
iterable
"""
return self._to_pandas().__iter__()
@_doc_binary_op(operation="modulo", bin_op="mod")
def __mod__(self, right):
return self.mod(right)
@_doc_binary_op(operation="modulo", bin_op="mod", other="left")
def __rmod__(self, left):
return self.rmod(left)
@_doc_binary_op(operation="multiplication", bin_op="mul")
def __mul__(self, right):
return self.mul(right)
@_doc_binary_op(operation="multiplication", bin_op="mul", other="left")
def __rmul__(self, left):
return self.rmul(left)
@_doc_binary_op(operation="disjunction", bin_op="or", other="other")
def __or__(self, other):
if isinstance(other, (list, np.ndarray, pandas.Series)):
return self._default_to_pandas(pandas.Series.__or__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__or__(new_other)
@_doc_binary_op(operation="disjunction", bin_op="or", other="other")
def __ror__(self, other):
if isinstance(other, (list, np.ndarray, pandas.Series)):
return self._default_to_pandas(pandas.Series.__ror__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__ror__(new_other)
@_doc_binary_op(operation="exclusive or", bin_op="xor", other="other")
def __xor__(self, other):
if isinstance(other, (list, np.ndarray, pandas.Series)):
return self._default_to_pandas(pandas.Series.__xor__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__xor__(new_other)
@_doc_binary_op(operation="exclusive or", bin_op="xor", other="other")
def __rxor__(self, other):
if isinstance(other, (list, np.ndarray, pandas.Series)):
return self._default_to_pandas(pandas.Series.__rxor__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__rxor__(new_other)
@_doc_binary_op(operation="exponential power", bin_op="pow")
def __pow__(self, right):
return self.pow(right)
@_doc_binary_op(operation="exponential power", bin_op="pow", other="left")
def __rpow__(self, left):
return self.rpow(left)
def __repr__(self):
"""
Return a string representation for a particular Series.
Returns
-------
str
"""
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
temp_df = self._build_repr_df(num_rows, num_cols)
if isinstance(temp_df, pandas.DataFrame) and not temp_df.empty:
temp_df = temp_df.iloc[:, 0]
temp_str = repr(temp_df)
freq_str = (
"Freq: {}, ".format(self.index.freqstr)
if isinstance(self.index, pandas.DatetimeIndex)
else ""
)
if self.name is not None:
name_str = "Name: {}, ".format(str(self.name))
else:
name_str = ""
if len(self.index) > num_rows:
len_str = "Length: {}, ".format(len(self.index))
else:
len_str = ""
dtype_str = "dtype: {}".format(
str(self.dtype) + ")"
if temp_df.empty
else temp_str.rsplit("dtype: ", 1)[-1]
)
if len(self) == 0:
return "Series([], {}{}{}".format(freq_str, name_str, dtype_str)
return temp_str.rsplit("\n", 1)[0] + "\n{}{}{}{}".format(
freq_str, name_str, len_str, dtype_str
)
def __round__(self, decimals=0):
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int, default: 0
Number of decimal places to round to.
Returns
-------
Series
"""
return self._create_or_update_from_compiler(
self._query_compiler.round(decimals=decimals)
)
def __setitem__(self, key, value):
"""
Set `value` identified by `key` in the Series.
Parameters
----------
key : hashable
Key to set.
value : Any
Value to set.
"""
if isinstance(key, slice):
self._setitem_slice(key, value)
else:
self.loc[key] = value
@_doc_binary_op(operation="subtraction", bin_op="sub")
def __sub__(self, right):
return self.sub(right)
@_doc_binary_op(operation="subtraction", bin_op="sub", other="left")
def __rsub__(self, left):
return self.rsub(left)
@_doc_binary_op(operation="floating division", bin_op="truediv")
def __truediv__(self, right):
return self.truediv(right)
@_doc_binary_op(operation="floating division", bin_op="truediv", other="left")
def __rtruediv__(self, left):
return self.rtruediv(left)
__iadd__ = __add__
__imul__ = __add__
__ipow__ = __pow__
__isub__ = __sub__
__itruediv__ = __truediv__
@property
def values(self): # noqa: RT01, D200
"""
Return Series as ndarray or ndarray-like depending on the dtype.
"""
return super(Series, self).to_numpy().flatten()
def add(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return Addition of series and other, element-wise (binary operator add).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).add(
new_other, level=level, fill_value=fill_value, axis=axis
)
def add_prefix(self, prefix): # noqa: PR01, RT01, D200
"""
Prefix labels with string `prefix`.
"""
return Series(query_compiler=self._query_compiler.add_prefix(prefix, axis=0))
def add_suffix(self, suffix): # noqa: PR01, RT01, D200
"""
Suffix labels with string `suffix`.
"""
return Series(query_compiler=self._query_compiler.add_suffix(suffix, axis=0))
def append(
self, to_append, ignore_index=False, verify_integrity=False
): # noqa: PR01, RT01, D200
"""
Concatenate two or more Series.
"""
from .dataframe import DataFrame
bad_type_msg = (
'cannot concatenate object of type "{}"; only pd.Series, '
"pd.DataFrame, and pd.Panel (deprecated) objs are valid"
)
if isinstance(to_append, list):
if not all(isinstance(o, BasePandasDataset) for o in to_append):
raise TypeError(
bad_type_msg.format(
type(
next(
o
for o in to_append
if not isinstance(o, BasePandasDataset)
)
)
)
)
elif all(isinstance(o, Series) for o in to_append):
self.name = None
for i in range(len(to_append)):
to_append[i].name = None
to_append[i] = to_append[i]._query_compiler
else:
# Matching pandas behavior of naming the Series columns 0
self.name = 0
for i in range(len(to_append)):
if isinstance(to_append[i], Series):
to_append[i].name = 0
to_append[i] = DataFrame(to_append[i])
return DataFrame(self.copy()).append(
to_append,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
)
elif isinstance(to_append, Series):
self.name = None
to_append.name = None
to_append = [to_append._query_compiler]
elif isinstance(to_append, DataFrame):
self.name = 0
return DataFrame(self.copy()).append(
to_append, ignore_index=ignore_index, verify_integrity=verify_integrity
)
else:
raise TypeError(bad_type_msg.format(type(to_append)))
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = (
self.index.append(to_append.index)
if not isinstance(to_append, list)
else self.index.append([o.index for o in to_append])
)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, to_append, ignore_index=ignore_index, sort=None
)
if len(query_compiler.columns) > 1:
return DataFrame(query_compiler=query_compiler)
else:
return Series(query_compiler=query_compiler)
def aggregate(self, func=None, axis=0, *args, **kwargs):
def error_raiser(msg, exception):
"""Convert passed exception to the same type as pandas do and raise it."""
# HACK: to concord with pandas error types by replacing all of the
# TypeErrors to the AssertionErrors
exception = exception if exception is not TypeError else AssertionError
raise exception(msg)
self._validate_function(func, on_invalid=error_raiser)
return super(Series, self).aggregate(func, axis, *args, **kwargs)
agg = aggregate
def apply(
self, func, convert_dtype=True, args=(), **kwargs
): # noqa: PR01, RT01, D200
"""
Invoke function on values of Series.
"""
self._validate_function(func)
# apply and aggregate have slightly different behaviors, so we have to use
# each one separately to determine the correct return type. In the case of
# `agg`, the axis is set, but it is not required for the computation, so we use
# it to determine which function to run.
if kwargs.pop("axis", None) is not None:
apply_func = "agg"
else:
apply_func = "apply"
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
# We create a "dummy" `Series` to do the error checking and determining
# the return type.
try:
return_type = type(
getattr(pandas.Series("", index=self.index[:1]), apply_func)(
func, *args, **kwargs
)
).__name__
except Exception:
try:
return_type = type(
getattr(pandas.Series(0, index=self.index[:1]), apply_func)(
func, *args, **kwargs
)
).__name__
except Exception:
return_type = type(self).__name__
if (
isinstance(func, str)
or is_list_like(func)
or return_type not in ["DataFrame", "Series"]
):
result = super(Series, self).apply(func, *args, **kwargs)
else:
# handle ufuncs and lambdas
if kwargs or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwargs)
else:
f = func
with np.errstate(all="ignore"):
if isinstance(f, np.ufunc):
return f(self)
result = self.map(f)._query_compiler
if return_type not in ["DataFrame", "Series"]:
# sometimes result can be not a query_compiler, but scalar (for example
# for sum or count functions)
if isinstance(result, type(self._query_compiler)):
return result.to_pandas().squeeze()
else:
return result
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=result
)
if result.name == self.index[0]:
result.name = None
return result
def argmax(self, axis=None, skipna=True, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return int position of the largest value in the Series.
"""
result = self.idxmax(axis=axis, skipna=skipna, *args, **kwargs)
if np.isnan(result) or result is pandas.NA:
result = -1
return result
def argmin(self, axis=None, skipna=True, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return int position of the smallest value in the Series.
"""
result = self.idxmin(axis=axis, skipna=skipna, *args, **kwargs)
if np.isnan(result) or result is pandas.NA:
result = -1
return result
def argsort(self, axis=0, kind="quicksort", order=None): # noqa: PR01, RT01, D200
"""
Return the integer indices that would sort the Series values.
"""
return self._default_to_pandas(
pandas.Series.argsort, axis=axis, kind=kind, order=order
)
def autocorr(self, lag=1): # noqa: PR01, RT01, D200
"""
Compute the lag-N autocorrelation.
"""
return self.corr(self.shift(lag))
def between(self, left, right, inclusive="both"): # noqa: PR01, RT01, D200
"""
Return boolean Series equivalent to left <= series <= right.
"""
return self._default_to_pandas(
pandas.Series.between, left, right, inclusive=inclusive
)
def combine(self, other, func, fill_value=None): # noqa: PR01, RT01, D200
"""
Combine the Series with a Series or scalar according to `func`.
"""
return super(Series, self).combine(
other, lambda s1, s2: s1.combine(s2, func, fill_value=fill_value)
)
def compare(
self,
other: "Series",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
): # noqa: PR01, RT01, D200
"""
Compare to another Series and show the differences.
"""
if not isinstance(other, Series):
raise TypeError(f"Cannot compare Series to {type(other)}")
result = self.to_frame().compare(
other.to_frame(),
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
if align_axis == "columns" or align_axis == 1:
# Pandas.DataFrame.Compare returns a dataframe with a multidimensional index object as the
# columns so we have to change column object back.
result.columns = pandas.Index(["self", "other"])
else:
result = result.squeeze().rename(None)
return result
def corr(self, other, method="pearson", min_periods=None): # noqa: PR01, RT01, D200
"""
Compute correlation with `other` Series, excluding missing values.
"""
if method == "pearson":
this, other = self.align(other, join="inner", copy=False)
this = self.__constructor__(this)
other = self.__constructor__(other)
if len(this) == 0:
return np.nan
if len(this) != len(other):
raise ValueError("Operands must have same size")
if min_periods is None:
min_periods = 1
valid = this.notna() & other.notna()
if not valid.all():
this = this[valid]
other = other[valid]
if len(this) < min_periods:
return np.nan
this = this.astype(dtype="float64")
other = other.astype(dtype="float64")
this -= this.mean()
other -= other.mean()
other = other.__constructor__(query_compiler=other._query_compiler.conj())
result = this * other / (len(this) - 1)
result = np.array([result.sum()])
stddev_this = ((this * this) / (len(this) - 1)).sum()
stddev_other = ((other * other) / (len(other) - 1)).sum()
stddev_this = np.array([np.sqrt(stddev_this)])
stddev_other = np.array([np.sqrt(stddev_other)])
result /= stddev_this * stddev_other
np.clip(result.real, -1, 1, out=result.real)
if np.iscomplexobj(result):
np.clip(result.imag, -1, 1, out=result.imag)
return result[0]
return self.__constructor__(
query_compiler=self._query_compiler.default_to_pandas(
pandas.Series.corr,
other._query_compiler,
method=method,
min_periods=min_periods,
)
)
def count(self, level=None): # noqa: PR01, RT01, D200
"""
Return number of non-NA/null observations in the Series.
"""
return super(Series, self).count(level=level)
def cov(
self, other, min_periods=None, ddof: Optional[int] = 1
): # noqa: PR01, RT01, D200
"""
Compute covariance with Series, excluding missing values.
"""
this, other = self.align(other, join="inner", copy=False)
this = self.__constructor__(this)
other = self.__constructor__(other)
if len(this) == 0:
return np.nan
if len(this) != len(other):
raise ValueError("Operands must have same size")
if min_periods is None:
min_periods = 1
valid = this.notna() & other.notna()
if not valid.all():
this = this[valid]
other = other[valid]
if len(this) < min_periods:
return np.nan
this = this.astype(dtype="float64")
other = other.astype(dtype="float64")
this -= this.mean()
other -= other.mean()
other = other.__constructor__(query_compiler=other._query_compiler.conj())
result = this * other / (len(this) - ddof)
result = result.sum()
return result
def describe(
self, percentiles=None, include=None, exclude=None, datetime_is_numeric=False
): # noqa: PR01, RT01, D200
"""
Generate descriptive statistics.
"""
# Pandas ignores the `include` and `exclude` for Series for some reason.
return super(Series, self).describe(
percentiles=percentiles, datetime_is_numeric=datetime_is_numeric
)
def diff(self, periods=1): # noqa: PR01, RT01, D200
"""
First discrete difference of element.
"""
return super(Series, self).diff(periods=periods, axis=0)
def divmod(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return Integer division and modulo of series and `other`, element-wise (binary operator `divmod`).
"""
return self._default_to_pandas(
pandas.Series.divmod, other, level=level, fill_value=fill_value, axis=axis
)
def dot(self, other): # noqa: PR01, RT01, D200
"""
Compute the dot product between the Series and the columns of `other`.
"""
if isinstance(other, BasePandasDataset):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindex(index=common)._query_compiler
if isinstance(other, Series):
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=True, squeeze_other=True
)
)
else:
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=True, squeeze_other=False
)
)
other = np.asarray(other)
if self.shape[0] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".format(self.shape, other.shape)
)
if len(other.shape) > 1:
return (
self._query_compiler.dot(other, squeeze_self=True).to_numpy().squeeze()
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=True)
)
def drop_duplicates(self, keep="first", inplace=False): # noqa: PR01, RT01, D200
"""
Return Series with duplicate values removed.
"""
return super(Series, self).drop_duplicates(keep=keep, inplace=inplace)
def dropna(self, axis=0, inplace=False, how=None): # noqa: PR01, RT01, D200
"""
Return a new Series with missing values removed.
"""
return super(Series, self).dropna(axis=axis, inplace=inplace)
def duplicated(self, keep="first"): # noqa: PR01, RT01, D200
"""
Indicate duplicate Series values.
"""
return self.to_frame().duplicated(keep=keep)
def eq(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return Equal to of series and `other`, element-wise (binary operator `eq`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).eq(new_other, level=level, axis=axis)
def equals(self, other): # noqa: PR01, RT01, D200
"""
Test whether two objects contain the same elements.
"""
return (
self.name == other.name
and self.index.equals(other.index)
and self.eq(other).all()
)
def explode(self, ignore_index: bool = False): # noqa: PR01, RT01, D200
"""
Transform each element of a list-like to a row.
"""
return self._default_to_pandas(pandas.Series.explode, ignore_index=ignore_index)
def factorize(self, sort=False, na_sentinel=-1): # noqa: PR01, RT01, D200
"""
Encode the object as an enumerated type or categorical variable.
"""
return self._default_to_pandas(
pandas.Series.factorize, sort=sort, na_sentinel=na_sentinel
)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
): # noqa: PR01, RT01, D200
"""
Fill NaNs inside of a Series object.
"""
if isinstance(value, BasePandasDataset) and not isinstance(value, Series):
raise TypeError(
'"value" parameter must be a scalar, dict or Series, but '
'you passed a "{0}"'.format(type(value).__name__)
)
return super(Series, self)._fillna(
squeeze_self=True,
squeeze_value=isinstance(value, Series),
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def floordiv(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Get Integer division of dataframe and `other`, element-wise (binary operator `floordiv`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).floordiv(
new_other, level=level, fill_value=None, axis=axis
)
def ge(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return greater than or equal to of series and `other`, element-wise (binary operator `ge`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).ge(new_other, level=level, axis=axis)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
dropna: bool = True,
): # noqa: PR01, RT01, D200
"""
Group Series using a mapper or by a Series of columns.
"""
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
from .groupby import SeriesGroupBy
if not as_index:
raise TypeError("as_index=False only valid with DataFrame")
# SeriesGroupBy expects a query compiler object if it is available
if isinstance(by, Series):
by = by._query_compiler
elif callable(by):
by = by(self.index)
elif by is None and level is None:
raise TypeError("You have to supply one of 'by' and 'level'")
return SeriesGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name=None,
observed=observed,
drop=False,
dropna=dropna,
)
def gt(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return greater than of series and `other`, element-wise (binary operator `gt`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).gt(new_other, level=level, axis=axis)
def hist(
self,
by=None,
ax=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
figsize=None,
bins=10,
**kwds,
): # noqa: PR01, RT01, D200
"""
Draw histogram of the input series using matplotlib.
"""
return self._default_to_pandas(
pandas.Series.hist,
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
**kwds,
)
def idxmax(self, axis=0, skipna=True, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return the row label of the maximum value.
"""
if skipna is None:
skipna = True
return super(Series, self).idxmax(axis=axis, skipna=skipna, *args, **kwargs)
def idxmin(self, axis=0, skipna=True, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return the row label of the minimum value.
"""
if skipna is None:
skipna = True
return super(Series, self).idxmin(axis=axis, skipna=skipna, *args, **kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction: Optional[str] = None,
limit_area=None,
downcast=None,
**kwargs,
): # noqa: PR01, RT01, D200
"""
Fill NaN values using an interpolation method.
"""
return self._default_to_pandas(
pandas.Series.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def item(self): # noqa: RT01, D200
"""
Return the first element of the underlying data as a Python scalar.
"""
return self[0]
def items(self): # noqa: D200
"""
Lazily iterate over (index, value) tuples.
"""
def item_builder(s):
return s.name, s.squeeze()
partition_iterator = PartitionIterator(self.to_frame(), 0, item_builder)
for v in partition_iterator:
yield v
def iteritems(self): # noqa: RT01, D200
"""
Lazily iterate over (index, value) tuples.
"""
return self.items()
def keys(self): # noqa: RT01, D200
"""
Return alias for index.
"""
return self.index
def kurt(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
): # noqa: PR01, RT01, D200
"""
Return unbiased kurtosis over requested axis.
"""
axis = self._get_axis_number(axis)
if numeric_only is True:
raise NotImplementedError("Series.kurt does not implement numeric_only.")
return super(Series, self).kurt(axis, skipna, level, numeric_only, **kwargs)
kurtosis = kurt
def le(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return less than or equal to of series and `other`, element-wise (binary operator `le`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).le(new_other, level=level, axis=axis)
def lt(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return less than of series and `other`, element-wise (binary operator `lt`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).lt(new_other, level=level, axis=axis)
def map(self, arg, na_action=None): # noqa: PR01, RT01, D200
"""
Map values of Series according to input correspondence.
"""
if not callable(arg) and hasattr(arg, "get"):
mapper = arg
def arg(s):
return mapper.get(s, np.nan)
return self.__constructor__(
query_compiler=self._query_compiler.applymap(
lambda s: arg(s)
if pandas.isnull(s) is not True or na_action is None
else s
)
)
def memory_usage(self, index=True, deep=False): # noqa: PR01, RT01, D200
"""
Return the memory usage of the Series.
"""
if index:
result = self._reduce_dimension(
self._query_compiler.memory_usage(index=False, deep=deep)
)
index_value = self.index.memory_usage(deep=deep)
return result + index_value
return super(Series, self).memory_usage(index=index, deep=deep)
def mod(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return Modulo of series and `other`, element-wise (binary operator `mod`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).mod(
new_other, level=level, fill_value=None, axis=axis
)
def mode(self, dropna=True): # noqa: PR01, RT01, D200
"""
Return the mode(s) of the Series.
"""
return super(Series, self).mode(numeric_only=False, dropna=dropna)
def mul(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return multiplication of series and `other`, element-wise (binary operator `mul`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).mul(
new_other, level=level, fill_value=None, axis=axis
)
multiply = rmul = mul
def ne(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return not equal to of series and `other`, element-wise (binary operator `ne`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).ne(new_other, level=level, axis=axis)
def nlargest(self, n=5, keep="first"): # noqa: PR01, RT01, D200
"""
Return the largest `n` elements.
"""
return self._default_to_pandas(pandas.Series.nlargest, n=n, keep=keep)
def nsmallest(self, n=5, keep="first"): # noqa: PR01, RT01, D200
"""
Return the smallest `n` elements.
"""
return Series(query_compiler=self._query_compiler.nsmallest(n=n, keep=keep))
def slice_shift(self, periods=1, axis=0): # noqa: PR01, RT01, D200
"""
Equivalent to `shift` without copying data.
"""
if periods == 0:
return self.copy()
if axis == "index" or axis == 0:
if abs(periods) >= len(self.index):
return Series(dtype=self.dtype)
else:
new_df = self.iloc[:-periods] if periods > 0 else self.iloc[-periods:]
new_df.index = (
self.index[periods:] if periods > 0 else self.index[:periods]
)
return new_df
else:
raise ValueError(
"No axis named {axis} for object type {type}".format(
axis=axis, type=type(self)
)
)
def shift(
self, periods=1, freq=None, axis=0, fill_value=None
): # noqa: PR01, RT01, D200
"""
Shift index by desired number of periods with an optional time `freq`.
"""
return super(type(self), self).shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def unstack(self, level=-1, fill_value=None): # noqa: PR01, RT01, D200
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
"""
from .dataframe import DataFrame
result = DataFrame(
query_compiler=self._query_compiler.unstack(level, fill_value)
)
return result.droplevel(0, axis=1) if result.columns.nlevels > 1 else result
@property
def plot(
self,
kind="line",
ax=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False,
**kwds,
): # noqa: PR01, RT01, D200
"""
Make plot of Series.
"""
return self._to_pandas().plot
def pow(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return exponential power of series and `other`, element-wise (binary operator `pow`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).pow(
new_other, level=level, fill_value=None, axis=axis
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
): # noqa: PR01, RT01, D200
"""
Return the product of the values over the requested `axis`.
"""
axis = self._get_axis_number(axis)
if skipna is None:
skipna = True
if level is not None:
if (
not self._query_compiler.has_multiindex(axis=axis)
and level > 0
or level < -1
and level != self.index.name
):
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
return self.groupby(level=level, axis=axis, sort=False).prod(
numeric_only=numeric_only, min_count=min_count, **kwargs
)
if numeric_only:
raise NotImplementedError(
f"Series.{self.name} does not implement numeric_only."
)
new_index = self.columns if axis else self.index
if min_count > len(new_index):
return np.nan
data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)
if min_count > 1:
return data._reduce_dimension(
data._query_compiler.prod_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
)
return data._reduce_dimension(
data._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
)
product = prod
radd = add
def ravel(self, order="C"): # noqa: PR01, RT01, D200
"""
Return the flattened underlying data as an ndarray.
"""
data = self._query_compiler.to_numpy().flatten(order=order)
if isinstance(self.dtype, pandas.CategoricalDtype):
data = pandas.Categorical(data, dtype=self.dtype)
return data
def reindex(self, index=None, **kwargs): # noqa: PR01, RT01, D200
"""
Conform Series to new index with optional filling logic.
"""
method = kwargs.pop("method", None)
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
'argument "{0}"'.format(list(kwargs.keys())[0])
)
return super(Series, self).reindex(
index=index,
method=method,
level=level,
copy=copy,
limit=limit,
tolerance=tolerance,
fill_value=fill_value,
)
def rename(
self,
index=None,
*,
axis=None,
copy=True,
inplace=False,
level=None,
errors="ignore",
): # noqa: PR01, RT01, D200
"""
Alter Series index labels or name.
"""
non_mapping = is_scalar(index) or (
| is_list_like(index) | pandas.core.dtypes.common.is_list_like |
import asyncio
import logging
import json
from glob import glob
from pathlib import Path
from dataclasses import dataclass, field, asdict
from typing import Dict, List
import pandas as pd
from magda.pipeline.parallel import init
from ki67.pipelines.config import ConfigPipeline
from ki67.common import Request
logging.basicConfig(
level=logging.INFO
)
CONFIG_DIR = Path('ki67/pipelines/configs')
SHARDS = ['amy', 'ben', 'charlie', 'ensemble']
SIZES = [48, 96, 192]
thresholds_file = Path('data/experiments/thresholds.csv')
THRESHOLDS = (
| pd.read_csv('data/experiments/thresholds.csv', index_col=0) | pandas.read_csv |
from ast import literal_eval
from os import listdir
from os.path import isfile, join
from scipy.sparse import save_npz, load_npz
import numpy as np
import os
import pandas as pd
import pickle
import stat
import yaml
def save_dataframe_csv(df, path, name):
df.to_csv(path+name, index=False)
def load_dataframe_csv(path, name, index_col=None):
return pd.read_csv(path+name, index_col=index_col)
def save_dataframe_latex(df, path, model):
with open('{0}{1}_parameter_tuning.tex'.format(path, model), 'w') as handle:
handle.write(df.to_latex(index=False))
def save_numpy(matrix, path, model):
save_npz('{0}{1}'.format(path, model), matrix)
def load_numpy(path, name):
return load_npz(path+name).tocsr()
def save_pickle(path, name, data):
with open('{0}/{1}.pickle'.format(path, name), 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_pickle(path, name):
with open('{0}/{1}.pickle'.format(path, name), 'rb') as handle:
data = pickle.load(handle)
return data
def load_yaml(path, key='parameters'):
with open(path, 'r') as stream:
try:
# return yaml.load(stream, Loader=yaml.FullLoader)[key]
return yaml.load(stream)[key]
except yaml.YAMLError as exc:
print(exc)
def find_best_hyperparameters(folder_path, meatric):
csv_files = [join(folder_path, f) for f in listdir(folder_path)
if isfile(join(folder_path, f)) and f.endswith('.csv')]
print(folder_path)
best_settings = []
for record in csv_files:
df = pd.read_csv(record)
df[meatric+'_Score'] = df[meatric].map(lambda x: literal_eval(x)[0])
best_settings.append(df.loc[df[meatric+'_Score'].idxmax()].to_dict())
df = | pd.DataFrame(best_settings) | pandas.DataFrame |
import numpy as np
import pandas as pd
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_non_participants,
)
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_one_strict_assort_by_group,
)
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_participants,
)
from src.create_initial_states.make_educ_group_columns import _determine_group_sizes
from src.create_initial_states.make_educ_group_columns import _get_id_to_weak_group
from src.create_initial_states.make_educ_group_columns import (
_get_key_with_longest_value,
)
from src.create_initial_states.make_educ_group_columns import (
_get_key_with_shortest_value,
)
from src.create_initial_states.make_educ_group_columns import _split_data_by_query
def test_get_id_to_weak_group():
raw_id = pd.Series([2, 2, 3, 3, 4, 4, 5, 5]) # dtype int is right.
participants = pd.DataFrame(index=[2, 3, 4, 5])
participants["__weak_group_id"] = [0, 1] + [1, 0]
expected = pd.Series([0, 1], index=[3, 4])
res = _get_id_to_weak_group(participants, raw_id)
pd.testing.assert_series_equal(res, expected, check_names=False)
def test_split_data_by_query():
df = pd.DataFrame(index=list("abcde"))
df["to_select"] = [True, True, False, True, False]
query = "to_select"
res_selected, res_others = _split_data_by_query(df, query)
expected_selected = df.loc[["a", "b", "d"]]
expected_other = df.loc[["c", "e"]]
pd.testing.assert_frame_equal(res_selected, expected_selected)
pd.testing.assert_frame_equal(res_others, expected_other)
def test_create_group_id_for_participants():
df = pd.DataFrame()
df["state"] = ["BY"] * 4 + ["NRW"] * 8
df["county"] = ["N", "N", "M", "M"] + ["K"] * 5 + ["D"] * 3
group_size = 2
strict_assort_by = "state"
weak_assort_by = "county"
res = _create_group_id_for_participants(
df=df,
group_size=group_size,
strict_assort_by=strict_assort_by,
weak_assort_by=weak_assort_by,
)
expected = pd.Series(
[2, 2, 1, 1, 4, 4, 6, 6, 7, 5, 5, 7], dtype=float, name="group_id"
)
pd.testing.assert_series_equal(res, expected)
def test_create_group_id_for_one_strict_assort_by_group_one_county_size_one():
df = pd.DataFrame()
df["weak_assort_by"] = ["a", "a", "a", "a"]
group_size = 1
weak_assort_by = "weak_assort_by"
start_id = 20
res, end_id = _create_group_id_for_one_strict_assort_by_group(
df=df, group_size=group_size, weak_assort_by=weak_assort_by, start_id=start_id
)
expected = | pd.Series([20.0, 21.0, 22.0, 23.0], index=df.index, name="group_id") | pandas.Series |
import os
from glob import glob
import zipfile
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
# -*- coding: utf-8 -*-
"""
Tests for abagen.samples module
"""
import numpy as np
import pandas as pd
import pytest
from abagen import samples
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# generate fake data (based largely on real data) so we know what to expect #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@pytest.fixture(scope='module')
def ontology():
""" Fake ontology dataframe
"""
sid = [4251, 4260, 4322, 4323, 9422]
hemi = ['L', 'R', 'L', 'R', np.nan]
acronym = ['S', 'S', 'Cl', 'Cl', 'CC']
path = [
'/4005/4006/4007/4008/4219/4249/12896/4251/',
'/4005/4006/4007/4008/4219/4249/12896/4260/',
'/4005/4006/4007/4275/4321/4322/',
'/4005/4006/4007/4275/4321/4323/',
'/4005/9352/9418/9422/',
]
name = [
'subiculum, left',
'subiculum, right',
'claustrum, left',
'claustrum, right',
'central canal',
]
return pd.DataFrame(dict(id=sid, hemisphere=hemi, name=name,
acronym=acronym, structure_id_path=path))
@pytest.fixture(scope='module')
def mm_annotation():
""" Fake annotation dataframe with some samples mislabelled
"""
mni_x = [-10, -20, 30, 40, 0]
sid = [4251, 4323, 4323, 4251, 9422]
sacr = ['S', 'Cl', 'Cl', 'S', 'CC']
sname = [
'subiculum, left',
'claustrum, right',
'claustrum, right',
'subiculum, left',
'central canal'
]
ind = pd.Series(range(len(sid)), name='sample_id')
return pd.DataFrame(dict(mni_x=mni_x, structure_id=sid,
structure_acronym=sacr, structure_name=sname),
index=ind)
@pytest.fixture(scope='module')
def annotation(mm_annotation):
""" Fake annotation dataframe
"""
out = mm_annotation.loc[[0, 2, 4]].reset_index(drop=True)
out.index.name = 'sample_id'
return out
@pytest.fixture(scope='module')
def microarray():
""" Fake microarray dataframe
"""
data = np.arange(9).reshape(3, 3)
cols = pd.Series(range(3), name='sample_id')
ind = pd.Series([1058685, 1058684, 1058683], name='probe_id')
return pd.DataFrame(data, columns=cols, index=ind)
@pytest.fixture(scope='module')
def pacall(microarray):
""" Fake PACall dataframe
"""
data = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
return pd.DataFrame(data, columns=microarray.columns,
index=microarray.index)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# test all the functions on our generated fake data so we know what to expect #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def test_update_mni_coords():
# xyz coordinates are getting replaced so who cares about the original
# but ids are important and need to be real!
x = y = z = [-10, 20, 30, 40]
ids = [594, 2985, 1058, 1145]
annotation = pd.DataFrame(dict(mni_x=x, mni_y=y, mni_z=z, well_id=ids))
out = samples.update_mni_coords(annotation)
# confirm that no samples were lost / reordered during the update process
# and that we still have all our columns
assert np.all(out['well_id'] == annotation['well_id'])
assert np.all(out.columns == annotation.columns)
# but DO confirm that _something_ changes about the dataframes (i.e., our
# bogus coordinates should be different)
with pytest.raises(AssertionError):
pd.testing.assert_frame_equal(out, annotation)
assert np.all(out['mni_x'] != annotation['mni_x'])
assert np.all(out['mni_y'] != annotation['mni_y'])
assert np.all(out['mni_z'] != annotation['mni_z'])
# if we provide invalid well_ids we should receive an error!
annotation['well_id'] = [594, 2985, 1058, 99999999999]
with pytest.raises(KeyError):
samples.update_mni_coords(annotation)
@pytest.mark.parametrize('path, expected', [
('/4005/4006/4007/4275/4276/4277/4278/12899/4286/', 'subcortex'),
('/4005/4006/4007/4275/4327/4341/4342/4344/', 'subcortex'),
('/4005/4006/4007/4008/4084/4103/4111/4112/4113/', 'cortex'),
('/4005/4006/4833/4696/4697/12930/12931/12933/4751/', 'cerebellum'),
('/4005/4006/9512/9676/9677/9680/9681/', 'brainstem'),
('/4005/4006/4833/9131/9132/9133/9492/', 'brainstem'),
('/4005/9218/9298/12959/265505622/', 'white matter'),
('/4005/9218/9219/9220/9227/', 'white matter'),
('/4005/9352/9418/9419/9708/', 'other'),
('/4005/9352/9353/9400/9402/', 'other'),
('/4005/4006/4833', None),
('thisisnotapath', None), # TODO: should this error?
])
def test__get_struct(path, expected):
out = samples._get_struct(path)
assert out == expected if expected is not None else out is None
def test_drop_mismatch_samples(mm_annotation, ontology):
# here's what we expect (i.e., indices 1 & 3 are dropped and the structure
# for the remaining samples is correctly extracted from the paths)
expected = pd.DataFrame(dict(hemisphere=['L', 'R', np.nan],
mni_x=[-10, 30, 0],
structure_acronym=['S', 'Cl', 'CC'],
structure=['subcortex', 'subcortex', 'other'],
structure_id=[4251, 4323, 9422],
structure_name=['subiculum, left',
'claustrum, right',
'central canal']),
index=[0, 2, 4])
# do we get what we expect? (ignore ordering of columns / index)
out = samples.drop_mismatch_samples(mm_annotation, ontology)
| pd.testing.assert_frame_equal(out, expected, check_like=True) | pandas.testing.assert_frame_equal |
import re
import numpy as np
import pandas as pd
import ops.filenames
from ops.constants import *
def parse_czi_export(f):
pat = '.*_s(\d+)c(\d+)m(\d+)_ORG.tif'
scene, channel, m = re.findall(pat, f)[0]
return {WELL: int(scene), CHANNEL: int(channel), SITE: int(m) - 1}
def make_czi_file_table(files, wells):
"""Provide list of wells to map from czi scenes to well names.
"""
wells = np.array(wells)
df1 = pd.DataFrame([parse_czi_export(f) for f in files])
df2 = (pd.DataFrame([ops.filenames.parse_filename(f) for f in files])
.drop(WELL, axis=1))
return ( | pd.concat([df1, df2], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 30 12:49:13 2019
@author: andre
"""
import pandas as pd
import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import plot_tree
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from itertools import combinations
from pandas.plotting import scatter_matrix
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.covariance import EmpiricalCovariance
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from matplotlib import cm as color_maps
from numpy import set_printoptions
import timeit
import pathlib
import os
import matplotlib
matplotlib.use('Agg')
__author__ = "<NAME>, <NAME>"
__copyright__ = "None"
__license__ = "License: BSD 3 clause"
__version__ = "0.2"
__maintainer__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
def gen_dataset():
num_samples = 500
base_time = datetime.datetime(2019, 1, 1)
time_series = np.array([base_time + (datetime.timedelta(seconds=i))/10 for i in range(num_samples)])
altitude = np.random.normal(scale = 1.0, size = num_samples)*2000 + 10000
for i, element in enumerate(altitude):
altitude[i] = element * (-np.cos(2*np.pi*i/num_samples) + 1)
temperature = (altitude.max() - altitude)/1000 + np.random.poisson(5, size = num_samples)
humidity = temperature + np.random.exponential(scale = 1, size = num_samples)*10
pressure = abs(((temperature / (32 + altitude)) + np.random.pareto(5, size = num_samples))**3)
pitch = np.random.normal(scale = 1.0, size = num_samples)
roll = np.random.normal(scale = 1.0, size = num_samples)
yaw = np.random.normal(scale = 1.0, size = num_samples)
latitude = np.random.normal(scale = 1.0, size = num_samples) + 30
longitude = np.random.normal(scale = 1.0, size = num_samples) - 180
#########################################
# CLASSIFY DATA #
#########################################
fail_conditions = 'True Fail Conditions:\n'
pressure_mod = pressure - np.percentile(pressure, 75)
fail_conditions += f'Pressure > {np.percentile(pressure, 75)}\n'
temperature_mod = temperature - np.percentile(temperature, 70)
fail_conditions += f'Temperature > {np.percentile(temperature, 90)}'
classification = []
for _pressure, _temperature in zip(pressure_mod, temperature_mod):
if (_pressure > 0) | (_temperature > 0):
classification.append(0)
else:
classification.append(1)
# classification = pd.Series(classification, dtype="category")
class_names = ['Fail', 'Pass']
data = {
'time':time_series,
'temperature':temperature,
'humidity':humidity,
'altitude':altitude,
'pressure':pressure,
# 'pitch' : pitch,
# 'roll' : roll,
# 'yaw' : yaw,
# 'latitude' : latitude,
# 'longitude' : longitude,
'classification': classification
}
feature_names = list(data.keys())
feature_names.remove('time')
feature_names.remove('classification')
df = pd.DataFrame(data)
return (df, feature_names)
def plot_raw_features(df, feature_names):
fig = plt.figure()
ncols = np.ceil(len(feature_names)/2)
for i, feature in enumerate(feature_names):
plt.subplot(2, ncols, i + 1)
plt.plot(df['time'], df[feature])
plt.xticks(rotation=45)
plt.xlabel('time')
plt.ylabel(feature)
plt.suptitle('Raw Features')
plt.close()
return fig
def plot_corr_mat(df, feature_names):
corr = df[feature_names].corr()
fig = plt.figure()
ax = fig.add_subplot(111)
cmap = plt.cm.RdBu
cax = ax.matshow(corr, vmin=-1, vmax=1, cmap = cmap)
plt.title('Feature Correlation')
labels = feature_names.copy()
labels.insert(0, '')
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
fig.colorbar(cax)
plt.close()
return fig
def plot_feature_box(df, feature_names):
ax = df[feature_names].plot(kind='box')
fig = ax.get_figure()
plt.suptitle('Box & Whisker Plot')
plt.close()
return fig
def plot_scatter_matrix(df, feature_names):
plt.style.use('default')
scatter_matrix(df[feature_names], diagonal='kde')
plt.suptitle('Scatter Matrix')
return plt.gcf()
def plot_histograms(df, feature_names):
df[feature_names].hist(bins = 20)
plt.suptitle('Histograms')
return plt.gcf()
def build_models(df, feature_names, algorithims_to_use):
algorithims = []
if 'adaboost' in algorithims_to_use:
algorithims.append(("AdaBoost", AdaBoostClassifier()))
if 'dtc' in algorithims_to_use:
algorithims.append(("Decision Tree", DecisionTreeClassifier(max_depth=5)))
if 'gaussian_process' in algorithims_to_use:
algorithims.append(("Gaussian Process", GaussianProcessClassifier(1.0 * RBF(1.0))))
if 'linear_svm' in algorithims_to_use:
algorithims.append(("Linear SVM", SVC(kernel="linear", C=0.025)))
if 'naive_bayes' in algorithims_to_use:
algorithims.append(("Naive Bayes", GaussianNB()))
if 'nearest_neighbors' in algorithims_to_use:
algorithims.append(("Nearest Neighbors", KNeighborsClassifier(3)))
if 'neural_network' in algorithims_to_use:
algorithims.append(("Neural Net", MLPClassifier(alpha=1, max_iter=1000)))
if 'qda' in algorithims_to_use:
algorithims.append(("QDA", QuadraticDiscriminantAnalysis()))
if 'random_forest' in algorithims_to_use:
algorithims.append(("Random Forest", RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)))
if 'rbf_svm' in algorithims_to_use:
algorithims.append(("RBF SVM", SVC(gamma=2, C=1)))
# create feature union
# features = []
# features.append(('pca', PCA(n_components='mle')))
# features.append(('select_best', SelectKBest(k=2)))
# feature_union = FeatureUnion(features)
estimators = []
# estimators.append(('feature_union', feature_union))
# estimators.append(('pca', PCA(n_components = 'mle')))
estimators.append(('standardize', StandardScaler()))
models = []
for algorithim in algorithims:
models.append(Pipeline(estimators + [algorithim]))
return models
def plot_algorithim_accuracy(df, feature_names, models):
# Split-out validation dataset
X = df[feature_names]
Y = df['classification']
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
scoring = 'accuracy'
# evaluate each model in turn
results = []
names = []
for model in models:
name = model.steps[-1][0]
kfold = model_selection.KFold(n_splits=10, shuffle = True, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
# Compare Algorithms
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
ax.set_xticklabels(names, rotation=40, ha='right')
return fig
def plot_learning_curve_(df, feature_names, model):
name = model.steps[-1][0]
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
title = f'Learning Curve\n{name}'
fig = plot_learning_curve(model, title, df[feature_names], df['classification'],
ylim=(0.7, 1.01), cv=cv, n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5))
return fig
def plot_algorithim_class_space(df, feature_names, clf):
coutnour_step_size = 200
comb = list(combinations(feature_names, 2))
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
name = clf.steps[-1][0]
individual_figure = plt.figure(figsize=(16, 10))
individual_figure.suptitle(f'Classification Space of {name}')
for pairidx, pair in enumerate(comb):
# We only take the two corresponding features
X = df[list(pair)]
y = df['classification']
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
#set plot step so there are 200 steps per axis
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
x_step = (x_max - x_min)/coutnour_step_size
y_step = (y_max - y_min)/coutnour_step_size
xx, yy = np.meshgrid(np.arange(x_min, x_max, x_step),
np.arange(y_min, y_max, y_step))
# just plot the dataset first
individual_ax = individual_figure.add_subplot(2, np.ceil(len(comb)/2), pairidx + 1)
individual_ax.set_xlabel(pair[0])
individual_ax.set_ylabel(pair[1])
individual_ax.set_xlim(xx.min(), xx.max())
individual_ax.set_ylim(yy.min(), yy.max())
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
individual_ax.contourf(xx, yy, Z, cmap=cm, alpha = 1)
# Plot the training points
individual_ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
individual_ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
individual_ax.text(0.97, 0.03, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right', bbox=dict(facecolor = 'white', alpha=0.5), transform=individual_ax.transAxes)
# #decision tree plot
# clf = DecisionTreeClassifier().fit(df[feature_names], df['classification'])
# plt.style.use('default')
# plt.figure(figsize = [16, 16])
# ###TODO this plot is broken. pdf? GraphViz?
# plot_tree(clf, filled=True, class_names = class_names, feature_names = feature_names)
# plt.annotate(fail_conditions, xy=(0, 1))
return individual_figure
def main():
#########################################
# INPUTS #
#########################################
main_start_time = timeit.default_timer()
save_results = True
do_combined_plot = True # very expensive with num_samples > 1000
do_learning_curve = False
img_dpi = 600
plt.style.use('default')
num_samples = 500
coutnour_step_size = 200
set_printoptions(precision=2)
save_path_root = pathlib.Path.cwd() / 'Artifacts' / 'Pipelined'
save_path_root.mkdir(parents=True, exist_ok=True)
report_file_path = save_path_root / 'Performance_Report.txt'
with open(report_file_path, 'w', encoding='utf-8') as fh:
fh.write('MACHINE LEARNING ALGORITHIM PERFORMANCE REPORT\n' \
f'REPORT DATE: {datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")}\n' \
f'File: {__file__}\n' \
f'Author: {__author__}\n' \
f'Copyright: {__copyright__}\n' \
f'License: {__license__}\n' \
f'Version: {__version__}\n' \
f'Maintainer: {__maintainer__}\n' \
f'E-Mail: {__email__}\n' \
f'Status: {__status__}\n\n')
if save_results:
print(f'Results will be saved in the following directory\n{save_path_root}')
# plt.style.use('ggplot')
#########################################
# GENERATE DATASET #
#########################################
base_time = datetime.datetime(2019, 1, 1)
time_series = np.array([base_time + (datetime.timedelta(seconds=i))/10 for i in range(num_samples)])
altitude = np.random.normal(scale = 1.0, size = num_samples)*2000 + 10000
for i, element in enumerate(altitude):
altitude[i] = element * (-np.cos(2*np.pi*i/num_samples) + 1)
temperature = (altitude.max() - altitude)/1000 + np.random.poisson(5, size = num_samples)
humidity = temperature + np.random.exponential(scale = 1, size = num_samples)*10
pressure = abs(((temperature / (32 + altitude)) + np.random.pareto(5, size = num_samples))**3)
pitch = np.random.normal(scale = 1.0, size = num_samples)
roll = np.random.normal(scale = 1.0, size = num_samples)
yaw = np.random.normal(scale = 1.0, size = num_samples)
latitude = np.random.normal(scale = 1.0, size = num_samples) + 30
longitude = np.random.normal(scale = 1.0, size = num_samples) - 180
#########################################
# CLASSIFY DATA #
#########################################
fail_conditions = 'True Fail Conditions:\n'
pressure_mod = pressure - np.percentile(pressure, 75)
fail_conditions += f'Pressure > {np.percentile(pressure, 75)}\n'
temperature_mod = temperature - np.percentile(temperature, 70)
fail_conditions += f'Temperature > {np.percentile(temperature, 90)}'
classification = []
for _pressure, _temperature in zip(pressure_mod, temperature_mod):
if (_pressure > 0) | (_temperature > 0):
classification.append(0)
else:
classification.append(1)
# classification = pd.Series(classification, dtype="category")
class_names = ['Fail', 'Pass']
data = {
'time':time_series,
'temperature':temperature,
'humidity':humidity,
'altitude':altitude,
'pressure':pressure,
'pitch' : pitch,
'roll' : roll,
'yaw' : yaw,
'latitude' : latitude,
'longitude' : longitude,
'classification': classification
}
feature_names = list(data.keys())
feature_names.remove('time')
feature_names.remove('classification')
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
import os
import glob
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from .utils import fracPoissonErrors
__all__ = ['Sim', 'CompareSims']
class Sim(object):
"""
Class to describe Simulation with Mass Functions
Parameters
----------
simulationDir : string, mandatory
Directory in which Mass Function Results calculated by HaloPy are
stored
name : A Name given to the particular simulation
"""
def __init__(self, simulationDir, name, filelist=None):
self.simDir = simulationDir
self.name = name
if filelist is None:
print(self.simDir)
filelist = glob.glob(self.simDir + '/*_mf')
self.filelist = filelist
def mf_fname(self, stepnum):
filelist = self.filelist
#print filelist
ind = map(lambda x: str(stepnum) in x, filelist).index(True)
fname = filelist[ind]
return fname
def mf_results(self, stepnum):
"""
Parameters
----------
stepnum :
"""
fname = self.mf_fname(stepnum)
names = ['FOF_Mass', 'numClusters', 'dndlnM', 'frac_Err', 'oneoversigma', 'f(sigma)', 'fsigma_fit', 'dn/dln_Mfit']
df = pd.DataFrame(np.loadtxt(fname)[:, :-1], columns=names, index=None)
return df
def massrange(self, stepnum):
df = self.mf_results(stepnum)
return min(df.FOF_Mass.values), max(df.FOF_Mass.values)
def interpolated_MF(self, stepnum):
df = self.mf_results(stepnum)
mass, massfn = df['FOF_Mass'].values, df['dndlnM']
return interp1d(np.log(mass), np.log(massfn))
class CompareSims(object):
def __init__(self, Sim1, Sim2, steps=[499, 247, 163]):
self.sim1 = Sim1
self.sim2 = Sim2
self.sim1name = Sim1.name
self.sim2name = Sim2.name
self.steps = steps
#print (steps)
@property
def comparisons(self):
steps = self.steps
df = self.calcSuppression(steps[0])
for step in steps[1:]:
df = df.append(self.calcSuppression(step))
return df
def calcSuppression(self, stepnum):
df1 = self.sim1.mf_results(stepnum)
df2 = self.sim2.mf_results(stepnum)
mass1 = df1.FOF_Mass.values
mass2 = df2.FOF_Mass.values
mf1 = df1.dndlnM.values
mf2 = df2.dndlnM.values
numClusters2 = df2.numClusters
# Note interpolation is not possible if mass2 is outside the range
# of mass1. So return np.nan values
mf2interp = np.empty(len(mass1))
mf2interp[:] = np.nan
mask = (mass1 > min(mass2))& (mass1 < max(mass2))
# Interpolation done in log space
mf2interp[mask] = np.exp(self.sim2.interpolated_MF(stepnum)(np.log(mass1[mask])))
names = [self.sim1name + '_mass',
self.sim1name + '_MF',
self.sim2name + '_mass',
self.sim2name + '_MF',
self.sim2name + '_InterpMF']
dfdict = dict()
dfdict[names[0]] = mass1
dfdict[names[1]] = mf1
dfdict[names[2]] = mass2
dfdict[names[3]] = mf2
dfdict[names[4]] = mf2interp
dfdict['numClusters1'] = df1.numClusters.values
dfdict['numClusters2'] = df2.numClusters.values
fracErrors1 = fracPoissonErrors(dfdict['numClusters1'])
fracErrors2 = fracPoissonErrors(dfdict['numClusters2'])
combinederr = mf2interp * np.sqrt((fracErrors1 )**2 +
(fracErrors2 )**2 ) / mf1
dfdict['directRatio'] = mf2 / mf1
dfdict['interpolatedRatio'] = mf2interp / mf1
dfdict['ApproxErrorLow'] = combinederr[0, :]
dfdict['ApproxErrorHigh'] = combinederr[1, :]
# for key in dfdict.keys():
# print(key, len(dfdict[key]))
df = | pd.DataFrame(dfdict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
| Series([2, 3, 4]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 15:07:09 2016
@author: advena
"""
#import re
from datetime import datetime
#import numpy as np
import pandas as pd
import os
import sys
import shutil
from dateutil import parser
########################################################################
# User Inputs #
########################################################################
# Do not edit the PJM owner list (except to correct it)
pjm_owner_list=[202,205,209,212,215,222,225,320,345]
# Filter the data down to these owners.
# For PJM only, set compare_owner_list = pjm_owner_list
# For all untis, set compare_owner_list = []
compare_owner_list = pjm_owner_list
#src_dir = r'K:\AFC_MODEL_UPDATES\2016_S\IDC Models'
src_dir = r'K:\AFC_MODEL_UPDATES\2016_S\IDC Models'
# Working directory. If src_dir != tgt_dir, a copy of the original raw
# files is copied to tgt_dir to prevent corruption of the originals.
# The copy here will have be modified; a new line will be added to the top
# to allow for csv parsing without error.
tgt_dir = r'C:\temp'
# The raw files to compare
raw_file1 = r'sum16idctr1p4_v32.RAW'
raw_file2 = r'sum16idctr1p6_v32.RAW'
# Maximim number of columns in any row, likely 28.
max_cols = 28 # set to zero to automatically determine.
# The regex pattern (before compiling) the identifies the header to a new section of the log file.
end_of_section_pattern="0 / "
########################################################################
# Function Dfinitions #
########################################################################
def max_col_cnt(filename):
'''
Finds row with mwx number of columns by counting commas
'''
max_commas = 0
lines = open(filename)
for line in lines:
cnt = line.count(',')
if cnt > max_commas:
max_commas = cnt
return max_commas + 1
def raw_to_df(src_dir, tgt_dir, filename, max_cols=28):
'''
src_dir: directory in which the raw files are located
tgt_dir: directory in which to copy the files
(to prevent corrupting originals)
filename: name of raw file (exluding path)
ins_hdr: True to add a generic header to the file (col1, col2, ...)
False if you already added a header to the file.
max_cols: The maximim number of columns in any row, likely 28.
'''
#create generic column headers
cols=["col"+str(i) for i in range(max_cols)]
#concatenate path and filename
src=os.path.join(src_dir,filename)
#copy both files to the target directory
if src_dir != tgt_dir and tgt_dir!=None and tgt_dir!='':
print(' copying raw file to working directory: ' + tgt_dir)
tgt=os.path.join(tgt_dir,filename)
shutil.copyfile(src, tgt)
else:
tgt=src
# return dataframe
print(' reading raw file into datafrme: ' + tgt_dir)
lst = pd.read_csv(open(tgt), names=cols, dtype= str )
return | pd.DataFrame(lst) | pandas.DataFrame |
# Copyright 2020 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from absl import app
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
from stock_prediction_class import StockPrediction
from stock_prediction_numpy import StockData
from datetime import timedelta
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
def main(argv):
print(tf.version.VERSION)
inference_folder = os.path.join(os.getcwd(), RUN_FOLDER)
stock = StockPrediction(STOCK_TICKER, STOCK_START_DATE, STOCK_VALIDATION_DATE, inference_folder)
data = StockData(stock)
(x_train, y_train), (x_test, y_test), (training_data, test_data) = data.download_transform_to_numpy(TIME_STEPS, inference_folder)
min_max = data.get_min_max()
# load future data
print('Latest Stock Price')
latest_close_price = test_data.Close.iloc[-1]
latest_date = test_data[-1:]['Close'].idxmin()
print(latest_close_price)
print('Latest Date')
print(latest_date)
tomorrow_date = latest_date + timedelta(1)
# Specify the next 300 days
next_year = latest_date + timedelta(TIME_STEPS*100)
print('Future Date')
print(tomorrow_date)
print('Future Timespan Date')
print(next_year)
x_test, y_test, test_data = data.generate_future_data(TIME_STEPS, min_max, tomorrow_date, next_year, latest_close_price)
# load the weights from our best model
model = tf.keras.models.load_model(os.path.join(inference_folder, 'model_weights.h5'))
model.summary()
#print(x_test)
#print(test_data)
# display the content of the model
baseline_results = model.evaluate(x_test, y_test, verbose=2)
for name, value in zip(model.metrics_names, baseline_results):
print(name, ': ', value)
print()
# perform a prediction
test_predictions_baseline = model.predict(x_test)
test_predictions_baseline = min_max.inverse_transform(test_predictions_baseline)
test_predictions_baseline = | pd.DataFrame(test_predictions_baseline) | pandas.DataFrame |
from src.typeDefs.section_1_1.section_1_1_volt import ISection_1_1_volt
import datetime as dt
from src.repos.metricsData.metricsDataRepo import MetricsDataRepo
from src.utils.addMonths import addMonths
import pandas as pd
def fetchSection1_1_voltContext(appDbConnStr: str, startDt: dt.datetime, endDt: dt.datetime) -> ISection_1_1_volt:
monthDtObj = dt.datetime(startDt.year, startDt.month, 1)
month_name = dt.datetime.strftime(startDt, "%b %y")
mRepo = MetricsDataRepo(appDbConnStr)
# get high voltage violation data for 765 kV
voltData765 = mRepo.getDailyVoltDataByLevel(
765, "%Time >420 or 800", startDt, endDt)
# convert to dataframe
voltData765Df = pd.DataFrame(voltData765)
# take only required columns
voltData765Df = voltData765Df[["entity_name", "data_val"]]
# convert column to numeric
voltData765Df['data_val'] = pd.to_numeric(
voltData765Df['data_val'], errors='coerce')
# get mean for each substation
voltData765Df = voltData765Df.groupby("entity_name").mean()
voltData765Df.reset_index(inplace=True)
# check if there is violation more than 0.1
is765MoreThan10Perc = voltData765Df[voltData765Df["data_val"]
> 10].shape[0] > 0
msg765 = ""
if not is765MoreThan10Perc:
msgStations = voltData765Df[voltData765Df["data_val"]
> 0]["entity_name"].values
msgStations = [x.replace(' - 765KV', '').capitalize()
for x in msgStations]
msg765 = "All 765 kV nodes of WR were within the IEGC limit."
if len(msgStations) > 0:
msg765 = "All 765 kV nodes of WR were within the IEGC limit except few instances at {0}.".format(
','.join(msgStations))
else:
msgStations = voltData765Df[voltData765Df["data_val"]
> 10]["entity_name"].values
msgStations = [x.replace(' - 765KV', '').capitalize()
for x in msgStations]
highViolSubstation = voltData765Df.loc[voltData765Df['data_val'].idxmax(
)]
msg765 = "High Voltage (greater than 800 kV) at 765 kV substations were observed at {0}. Highest of {1}{2} of time voltage remained above 780 kV at {3} in the month of {4}.".format(
', '.join(msgStations), round(highViolSubstation["data_val"], 2), "%", highViolSubstation["entity_name"].replace(' - 765KV', '').capitalize(), month_name)
# get high voltage violation data for 400 kV
voltData400 = mRepo.getDailyVoltDataByLevel(
400, "%Time >420 or 800", startDt, endDt)
# convert to dataframe
voltData400Df = | pd.DataFrame(voltData400) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
import numpy as np
import plotly.graph_objs as go
import plotly.tools as tools
from dash.dependencies import Input, Output, State
from dateutil.parser import parse
import squarify
import math
from datetime import datetime
from bisect import bisect_left
import grasia_dash_components as gdc
####################################################
### DASH SETUP CODE ###
####################################################
# Setup Dash's default CSS stylesheet
#external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# Setup the Dash application
#app = dash.Dash(__name__, external_stylesheets=external_stylesheets, static_folder='static')
app = dash.Dash(__name__, static_folder='static')
app.title = 'Bitcoin Booms and Busts'
server = app.server
####################################################
### DESCENTRALIZED VIZ CODE ###
####################################################
tm_width = 100
tm_height = 100
tm_x = 0
tm_y = 0
# color pallette for the viz
cList = ['lightcyan', 'lightblue', 'deepskyblue', 'dodgerblue', 'steelblue',
'midnightblue']
table = [['Name', 'Estimated Balance'],
['Planktons', '0 to 1 Bitcoin'],
['Clowfishes', '1 to 10 Bitcoins'],
['Lionfishes', '10 to 100 Bitcoins'],
['Swordfishes', '100 to 1000 Bitcoins'],
['Sharks', '1000 to 10000 Bitcoins'],
['Whales', 'More than 10000 Bitcoins']]
df_table = pd.DataFrame(table)
df_table.columns = df_table.iloc[0]
df_table = df_table[1:]
df_val_per_month = pd.read_csv('change_bins_values_per_month.csv')
df_val_per_month.fillna(0, inplace=True)
df_val_per_month.loc[:,"_0_to_1":"More_10000"] = df_val_per_month.loc[:,"_0_to_1":"More_10000"].div(df_val_per_month.sum(axis=1), axis=0) * 100
df_val_per_month.columns = ["Month", "Planktons", "Clownfishes",
"Lionfishes", "Swordfishes", "Sharks", "Whales"]
df_val_per_month = df_val_per_month.sort_values(by='Month')
df_val_per_month.Month = pd.to_datetime(df_val_per_month.Month)
df_val_per_month = df_val_per_month.set_index(['Month'])
df_ct_per_month = pd.read_csv('change_count_bins_per_month.csv')
df_ct_per_month.fillna(0, inplace=True)
df_ct_per_month.loc[:,"_0_to_1":"More_10000"] = df_ct_per_month.loc[:,"_0_to_1":"More_10000"].div(df_ct_per_month.sum(axis=1), axis=0) * 100
df_ct_per_month.columns = ["Month", "Planktons", "Clownfishes",
"Lionfishes", "Swordfishes", "Sharks", "Whales"]
df_ct_per_month = df_ct_per_month.sort_values(by='Month')
df_ct_per_month.Month = pd.to_datetime(df_ct_per_month.Month)
df_ct_per_month = df_ct_per_month.set_index(['Month'])
epoch = datetime.utcfromtimestamp(0)
def unix_time_millis(dt):
return (dt - epoch).total_seconds()
def timestamp_millis(unix_ts):
ts = datetime.utcfromtimestamp(unix_ts).strftime('%Y-%m-%d')
return | pd.to_datetime(ts) | pandas.to_datetime |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from generate_paper_outputs import wave_column_headings
def plot_grouped_bar(backend="combined", output_dir="released_outputs/combined", measure="declined", breakdown="high_level_ethnicity"):
''' Plot a chart showing the percent of people of each ethnicity or imd band and by priority group
who have a decline recorded.
Note the necessary input csv is only available in combined data.
'''
if measure == "declined":
df = | pd.read_csv(f"released_outputs/{backend}/tables/waves_1_9_declined_{breakdown}.csv", index_col=0) | pandas.read_csv |
import os
import glob
import datetime
from collections import OrderedDict
import pandas as pd
import numpy as np
import pandas_market_calendars as mcal
import matplotlib.pyplot as plt
FILEPATH = '/home/nate/Dropbox/data/sp600/'
CONSTITUENT_FILEPATH = '/home/nate/Dropbox/data/barchart.com/'
WRDS_FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/tsv/'
def get_home_dir(repo_name='beat_market_analysis'):
cwd = os.getcwd()
cwd_list = cwd.split('/')
repo_position = [i for i, s in enumerate(cwd_list) if s == repo_name]
if len(repo_position) > 1:
print("error! more than one intance of repo name in path")
return None
home_dir = '/'.join(cwd_list[:repo_position[0] + 1]) + '/'
return home_dir
def get_historical_constituents_wrds():
"""
gets historical constituents from WRDS file
"""
# TODO: get latest file
df = pd.read_csv(WRDS_FILEPATH + 'index_constituents_9-12-2018.txt', parse_dates=['from', 'thru'], infer_datetime_format=True, sep='\t')
if df['from'][0].tzinfo is None:
df['from'] = df['from'].dt.tz_localize('US/Eastern')
if df['thru'][0].tzinfo is None:
df['thru'] = df['thru'].dt.tz_localize('US/Eastern')
# only use s&p600 for now
sp600_df = df[df['conm'] == 'S&P Smallcap 600 Index']
# create dataframe with list of constituents for each day
start = sp600_df['from'].min()
# get todays date and reset hour, min, sec to 0s
end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0)
# replace NaT with tomorrow's date
# gives copy warning but can't get rid of it...
sp600_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True)
nyse = mcal.get_calendar('NYSE')
# gets all dates
# date_range = mcal.date_range(start=start, end=end)
# gets only dates valid for NYSE
date_range = nyse.valid_days(start_date=start.date(), end_date=end.date())
constituent_companies = OrderedDict()
constituent_tickers = OrderedDict()
lengths = []
# TODO: multiprocessing to speed up
for d in date_range:
# if date is within stock's from and thru, add to list
# stocks were removed on 'thru', so if it is the 'thru' date, then shouldn't be included
# but stocks were added on 'from' date, so include stocks on 'from' date
# use dataframe masking
date_string = d.strftime('%Y-%m-%d')
current_stocks = sp600_df[(sp600_df['from'] <= d) & (sp600_df['thru'] > d)]
current_companies = current_stocks['co_conm'] # company names
current_tickers = current_stocks['co_tic'] # company tickers
constituent_companies[date_string] = current_companies
constituent_tickers[date_string] = current_tickers
lengths.append(current_tickers.shape[0])
# look at number of constituents as a histogram; mostly 600 but a few above and below
# pd.value_counts(lengths)
# plt.hist(lengths)
# plt.show()
# TODO:
# need to check that no tickers are used for multiple companies
# get unique dates where changes were made
unique_dates = set(sp600_df['from'].unique()) | set(sp600_df['thru'].unique())
return constituent_companies, constituent_tickers, unique_dates
def get_latest_daily_date(source='barchart.com'):
# get latest date from daily scrapes
daily_files = glob.glob(FILEPATH + '{}/*.csv'.format(source))
if len(daily_files) == 0:
return None
daily_dates = [pd.to_datetime(f.split('/')[-1].split('_')[-1].split('.')[0]) for f in daily_files]
last_daily = max(daily_dates)
return last_daily
def get_latest_daily_date_constituents(index='QQQ'):
# get latest date from daily scrapes
daily_files = glob.glob(CONSTITUENT_FILEPATH + '{}/*.csv'.format(index))
if len(daily_files) == 0:
return None
daily_dates = [pd.to_datetime(f.split('/')[-1].split('_')[-1].split('.')[0]) for f in daily_files]
last_daily = max(daily_dates)
return last_daily
def get_latest_index_date(ticker='IJR'):
# get latest date from daily scrapes
extension = 'csv'
if ticker == 'SLY':
extension = 'xls'
daily_files = glob.glob(FILEPATH + 'index_funds/{}/*.{}'.format(ticker, extension))
if len(daily_files) == 0:
return None
daily_dates = [pd.to_datetime(f.split('/')[-1].split('_')[-1].split('.')[0]) for f in daily_files]
last_daily = max(daily_dates)
return last_daily
def load_sp600_files(date='latest', source='barchart.com'):
"""
loads data from files from investing.com
https://www.investing.com/indices/s-p-600-components
date should be a string, either 'latest' to use the latest available date, or
a specific date like YYYY-mm-dd
"""
# TODO: deal with 0 bytes files
folder = FILEPATH + '{}/'.format(source)
dfs = []
labels = ['price', 'performance', 'technical', 'fundamental']
if date == 'latest':
file_date = get_latest_daily_date(source=source).strftime('%Y-%m-%d')
if file_date is None:
print('no files to load!')
return None
else:
file_date = date
for l in labels:
filename = 'sp600_{}_{}.csv'.format(l, file_date)
print(filename)
if source == 'barchart.com':
# catch errors with 0 bytes filesize
if os.path.getsize(folder + filename) == 0:
print('filesize is 0 for', filename, 'returning None')
return None
dfs.append(pd.read_csv(folder + filename, skipfooter=1))
elif source == 'investing.com':
dfs.append(pd.read_csv(folder + filename))
# ensure the names and symbols are identical
eq01 = dfs[0][['Name', 'Symbol']].equals(dfs[1][['Name', 'Symbol']])
eq12 = dfs[1][['Name', 'Symbol']].equals(dfs[2][['Name', 'Symbol']])
eq23 = dfs[2][['Name', 'Symbol']].equals(dfs[3][['Name', 'Symbol']])
if eq01 and eq12 and eq23:
print('all names/symbols are equal')
else:
print('WARNING: some names/symbols not equal')
for d in dfs:
d.set_index('Symbol', inplace=True)
if source == 'barchart.com':
d = d[:-2] # the last row has some info about barchart.com
# remove 'Name' column from all but first df
for d in dfs[1:]:
d.drop('Name', axis=1, inplace=True)
if source == 'barchart.com':
if 'Last' in d.columns:
d.drop('Last', axis=1, inplace=True)
if source == 'investing.com':
# add prefixes so 'Daily' is different for performance and technical dfs
dfs[1].columns = ['perf ' + c for c in dfs[1].columns]
dfs[2].columns = ['tech ' + c for c in dfs[2].columns]
df = pd.concat(dfs, axis=1)
# 'Time' column seems to be just year/month
df.drop('Time', axis=1, inplace=True)
# convert k to 1000, M to 1e6, and B to 1.9
if source == 'barchart.com':
# just need to rename the column, the data is not $K, just $
df['Market Cap'] = df['Market Cap, $K']
df.drop('Market Cap, $K', axis=1, inplace=True)
elif source == 'investing.com':
for c in ['Vol.', 'Average Vol. (3m)', 'Market Cap', 'Revenue']:
df[c] = df[c].apply(lambda x: clean_abbreviations(x))
# clean up % columns
if source == 'barchart.com':
if 'Div Yield(a)' in df.columns:
cols = ['%Chg', 'Wtd Alpha', 'YTD %Chg', '1M %Chg', '3M %Chg', '52W %Chg', '20D Rel Str', '20D His Vol', 'Div Yield(a)']
elif 'Div Yield(ttm)' in df.columns:
cols = ['%Chg', 'Wtd Alpha', 'YTD %Chg', '1M %Chg', '3M %Chg', '52W %Chg', '20D Rel Str', '20D His Vol', 'Div Yield(ttm)']
else:
yield_col = [c for c in df.columns if 'Div Yield' in c]
cols = ['%Chg', 'Wtd Alpha', 'YTD %Chg', '1M %Chg', '3M %Chg', '52W %Chg', '20D Rel Str', '20D His Vol'] + yield_col
elif source == 'investing.com':
cols = ['Chg. %', 'perf Daily', 'perf 1 Week', 'perf 1 Month', 'perf YTD', 'perf 1 Year', 'perf 3 Years']
for c in cols:
df[c] = df[c].apply(lambda x: clean_pcts(x))
if source == 'investing.com':
# maps technical indicators to numbers for sorting
conversion_dict = {'Strong Buy': 2,
'Buy': 1,
'Neutral': 0,
'Sell': -1,
'Strong Sell': -2}
for k, v in conversion_dict.items():
for c in dfs[2].columns:
df.at[df[c] == k, c] = v
return df
def clean_pcts(x):
"""
the 'Chg. %' column and others have entries like +1.24%
"""
# if not enough data, will be '-' with investing.com
if x == '-' or pd.isnull(x):
return np.nan
elif x == 'unch':
return float(0)
elif type(x) == float:
return x
new_x = x.replace('+', '')
new_x = new_x.replace('%', '')
new_x = float(new_x) / 100
return new_x
def clean_abbreviations(x):
"""
replaces K with 000, M with 000000, B with 000000000
"""
# a few entries in Revenue were nan
if pd.isnull(x):
return np.nan
elif 'K' in x:
return int(float(x[:-1]) * 1e3)
elif 'M' in x:
return int(float(x[:-1]) * 1e6)
elif 'B' in x:
return int(float(x[:-1]) * 1e9)
else:
return int(x)
def get_current_smallest_mkt_cap(df, n=20):
"""
using df from investing.com and the load_sp600_files function,
gets the n number of smallest market-cap stocks
should use barchart.com or wrds as source of constituents
"""
sorted_df = df.sort_values(by='Market Cap')
return sorted_df.iloc[:n].index
def load_ijr_holdings():
latest_date = get_latest_index_date(ticker='IJR')
if latest_date is None:
print('no files')
return
filename = FILEPATH + 'index_funds/IJR/IJR_holdings_' + latest_date.strftime('%Y-%m-%d') + '.csv'
df = pd.read_csv(filename, skiprows=10)
df = df[df['Asset Class'] == 'Equity']
for c in ['Shares', 'Market Value', 'Notional Value']:
df[c] = df[c].apply(lambda x: x.replace(',', '')).astype(float)
df['Price'] = df['Price'].astype(float)
df.set_index('Ticker', inplace=True)
return df
def load_sly_holdings():
latest_date = get_latest_index_date(ticker='SLY')
if latest_date is None:
print('no files')
return
filename = FILEPATH + 'index_funds/SLY/SLY_holdings_' + latest_date.strftime('%Y-%m-%d') + '.xls'
df = | pd.read_excel(filename, skiprows=3, skipfooter=11) | pandas.read_excel |
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
url_confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
url_deaths = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
url_recovered = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'
confirmed = pd.read_csv(url_confirmed)
deaths = pd.read_csv(url_deaths)
recovered = | pd.read_csv(url_recovered) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler
def concat_df(train_data, test_data):
return pd.concat([train_data, test_data], sort=True).reset_index(drop=True)
df_train = | pd.read_csv('https://storage.googleapis.com/dqlab-dataset/challenge/feature-engineering/titanic_train.csv') | pandas.read_csv |
from functools import partial
from tqdm import tqdm
import multiprocessing as mp
import pandas as pd
import geopandas as gpd
import numpy as np
idx = pd.IndexSlice
def cartesian(s1, s2):
"""Cartesian product of two pd.Series"""
return pd.DataFrame(np.outer(s1, s2), index=s1.index, columns=s2.index)
def reverse(dictionary):
"""reverses a keys and values of a dictionary"""
return {v: k for k, v in dictionary.items()}
# translations for Eurostat
eurostat_country_to_alpha2 = {
"EU28": "EU",
"EA19": "EA",
"Belgium": "BE",
"Bulgaria": "BG",
"Czech Republic": "CZ",
"Denmark": "DK",
"Germany": "DE",
"Estonia": "EE",
"Ireland": "IE",
"Greece": "GR",
"Spain": "ES",
"France": "FR",
"Croatia": "HR",
"Italy": "IT",
"Cyprus": "CY",
"Latvia": "LV",
"Lithuania": "LT",
"Luxembourg": "LU",
"Hungary": "HU",
"Malta": "MA",
"Netherlands": "NL",
"Austria": "AT",
"Poland": "PL",
"Portugal": "PT",
"Romania": "RO",
"Slovenia": "SI",
"Slovakia": "SK",
"Finland": "FI",
"Sweden": "SE",
"United Kingdom": "GB",
"Iceland": "IS",
"Norway": "NO",
"Montenegro": "ME",
"FYR of Macedonia": "MK",
"Albania": "AL",
"Serbia": "RS",
"Turkey": "TU",
"Bosnia and Herzegovina": "BA",
"Kosovo\n(UNSCR 1244/99)": "KO", # 2017 version
# 2016 version
"Kosovo\n(under United Nations Security Council Resolution 1244/99)": "KO",
"Moldova": "MO",
"Ukraine": "UK",
"Switzerland": "CH",
}
non_EU = ["NO", "CH", "ME", "MK", "RS", "BA", "AL"]
idees_rename = {"GR": "EL", "GB": "UK"}
eu28 = [
"FR",
"DE",
"GB",
"IT",
"ES",
"PL",
"SE",
"NL",
"BE",
"FI",
"CZ",
"DK",
"PT",
"RO",
"AT",
"BG",
"EE",
"GR",
"LV",
"HU",
"IE",
"SK",
"LT",
"HR",
"LU",
"SI",
] + ["CY", "MT"]
eu28_eea = eu28.copy()
eu28_eea.remove("GB")
eu28_eea.append("UK")
to_ipcc = {
"electricity": "1.A.1.a - Public Electricity and Heat Production",
"residential non-elec": "1.A.4.b - Residential",
"services non-elec": "1.A.4.a - Commercial/Institutional",
"rail non-elec": "1.A.3.c - Railways",
"road non-elec": "1.A.3.b - Road Transportation",
"domestic navigation": "1.A.3.d - Domestic Navigation",
"international navigation": "1.D.1.b - International Navigation",
"domestic aviation": "1.A.3.a - Domestic Aviation",
"international aviation": "1.D.1.a - International Aviation",
"total energy": "1 - Energy",
"industrial processes": "2 - Industrial Processes and Product Use",
"agriculture": "3 - Agriculture",
"agriculture, forestry and fishing": '1.A.4.c - Agriculture/Forestry/Fishing',
"LULUCF": "4 - Land Use, Land-Use Change and Forestry",
"waste management": "5 - Waste management",
"other": "6 - Other Sector",
"indirect": "ind_CO2 - Indirect CO2",
"total wL": "Total (with LULUCF)",
"total woL": "Total (without LULUCF)",
}
def build_eurostat(countries, year):
"""Return multi-index for all countries' energy data in TWh/a."""
report_year = snakemake.config["energy"]["eurostat_report_year"]
filenames = {
2016: f"/{year}-Energy-Balances-June2016edition.xlsx",
2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx"
}
dfs = pd.read_excel(
snakemake.input.eurostat + filenames[report_year],
sheet_name=None,
skiprows=1,
index_col=list(range(4)),
)
# sorted_index necessary for slicing
lookup = eurostat_country_to_alpha2
labelled_dfs = {lookup[df.columns[0]]: df
for df in dfs.values()
if lookup[df.columns[0]] in countries}
df = pd.concat(labelled_dfs, sort=True).sort_index()
# drop non-numeric and country columns
non_numeric_cols = df.columns[df.dtypes != float]
country_cols = df.columns.intersection(lookup.keys())
to_drop = non_numeric_cols.union(country_cols)
df.drop(to_drop, axis=1, inplace=True)
# convert ktoe/a to TWh/a
df *= 11.63 / 1e3
return df
def build_swiss(year):
"""Return a pd.Series of Swiss energy data in TWh/a"""
fn = snakemake.input.swiss
df = pd.read_csv(fn, index_col=[0,1]).loc["CH", str(year)]
# convert PJ/a to TWh/a
df /= 3.6
return df
def idees_per_country(ct, year):
base_dir = snakemake.input.idees
ct_totals = {}
ct_idees = idees_rename.get(ct, ct)
fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx"
fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx"
fn_transport = f"{base_dir}/JRC-IDEES-2015_Transport_{ct_idees}.xlsx"
# residential
df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year]
ct_totals["total residential space"] = df["Space heating"]
rows = ["Advanced electric heating", "Conventional electric heating"]
ct_totals["electricity residential space"] = df[rows].sum()
ct_totals["total residential water"] = df.at["Water heating"]
assert df.index[23] == "Electricity"
ct_totals["electricity residential water"] = df[23]
ct_totals["total residential cooking"] = df["Cooking"]
assert df.index[30] == "Electricity"
ct_totals["electricity residential cooking"] = df[30]
df = pd.read_excel(fn_residential, "RES_summary", index_col=0)[year]
row = "Energy consumption by fuel - Eurostat structure (ktoe)"
ct_totals["total residential"] = df[row]
assert df.index[47] == "Electricity"
ct_totals["electricity residential"] = df[47]
assert df.index[46] == "Derived heat"
ct_totals["derived heat residential"] = df[46]
assert df.index[50] == 'Thermal uses'
ct_totals["thermal uses residential"] = df[50]
# services
df = pd.read_excel(fn_tertiary, "SER_hh_fec", index_col=0)[year]
ct_totals["total services space"] = df["Space heating"]
rows = ["Advanced electric heating", "Conventional electric heating"]
ct_totals["electricity services space"] = df[rows].sum()
ct_totals["total services water"] = df["Hot water"]
assert df.index[24] == "Electricity"
ct_totals["electricity services water"] = df[24]
ct_totals["total services cooking"] = df["Catering"]
assert df.index[31] == "Electricity"
ct_totals["electricity services cooking"] = df[31]
df = pd.read_excel(fn_tertiary, "SER_summary", index_col=0)[year]
row = "Energy consumption by fuel - Eurostat structure (ktoe)"
ct_totals["total services"] = df[row]
assert df.index[50] == "Electricity"
ct_totals["electricity services"] = df[50]
assert df.index[49] == "Derived heat"
ct_totals["derived heat services"] = df[49]
assert df.index[53] == 'Thermal uses'
ct_totals["thermal uses services"] = df[53]
# agriculture, forestry and fishing
start = "Detailed split of energy consumption (ktoe)"
end = "Market shares of energy uses (%)"
df = pd.read_excel(fn_tertiary, "AGR_fec", index_col=0).loc[start:end, year]
rows = [
"Lighting",
"Ventilation",
"Specific electricity uses",
"Pumping devices (electric)"
]
ct_totals["total agriculture electricity"] = df[rows].sum()
rows = ["Specific heat uses", "Low enthalpy heat"]
ct_totals["total agriculture heat"] = df[rows].sum()
rows = [
"Motor drives",
"Farming machine drives (diesel oil incl. biofuels)",
"Pumping devices (diesel oil incl. biofuels)",
]
ct_totals["total agriculture machinery"] = df[rows].sum()
row = "Agriculture, forestry and fishing"
ct_totals["total agriculture"] = df[row]
# transport
df = | pd.read_excel(fn_transport, "TrRoad_ene", index_col=0) | pandas.read_excel |
import pandas as pd
import numpy as np
from pandas.tseries.holiday import USFederalHolidayCalendar
import seaborn as sns
import matplotlib.pyplot as plt
import glob
import sweetviz as sv
from scipy import stats
import sklearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
combined_data_path = "./data/output/2018data.csv"
# Data from https://gbfs.capitalbikeshare.com/gbfs/en/station_information.json
station_info_path = './data/station_information.json'
def prepare_data():
# Get all months data and combine them
data_files = glob.glob("./data/source_data/2018/*.csv")
dfs = list()
for filename in data_files:
df = pd.read_csv(filename)
dfs.append(df)
df = pd.concat(dfs, axis=0, ignore_index=True)
return df
def save_combined_data(df):
df.to_csv(combined_data_path)
def get_combined_data():
return pd.read_csv(combined_data_path)
def get_US_holidays(start_date, to_date):
return USFederalHolidayCalendar().holidays(start=start_date, end=to_date)
def extract_date_features(df, holidays):
df['hour'] = df['date'].dt.hour
df['day'] = df['date'].dt.day
df['month'] = df['date'].dt.month
df['day_of_week'] = df['date'].dt.dayofweek + 1
df['weekend'] = (df['day_of_week'] >= 5).astype(int)
df['season'] = df['date'].dt.month%12 // 3 + 1
df['holiday'] = df['date'].dt.date.astype('datetime64').isin(holidays).astype(int)
df['workingday'] = (~(df['weekend'].astype(bool) | df['holiday'].astype(bool))).astype(int)
return df
def get_station_data(df):
stations_info = | pd.read_json(station_info_path) | pandas.read_json |
"""
Multi criteria decision analysis
"""
from __future__ import division
from __future__ import print_function
import json
import os
import pandas as pd
import numpy as np
import cea.config
import cea.inputlocator
from cea.optimization.lca_calculations import lca_calculations
from cea.analysis.multicriteria.optimization_post_processing.electricity_imports_exports_script import electricity_import_and_exports
from cea.technologies.solar.photovoltaic import calc_Cinv_pv
from cea.optimization.constants import PUMP_ETA
from cea.constants import DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3
from cea.optimization.constants import SIZING_MARGIN
from cea.analysis.multicriteria.optimization_post_processing.individual_configuration import calc_opex_PV
from cea.technologies.chiller_vapor_compression import calc_Cinv_VCC
from cea.technologies.chiller_absorption import calc_Cinv
from cea.technologies.cooling_tower import calc_Cinv_CT
import cea.optimization.distribution.network_opt_main as network_opt
from cea.analysis.multicriteria.optimization_post_processing.locating_individuals_in_generation_script import locating_individuals_in_generation_script
from cea.technologies.heat_exchangers import calc_Cinv_HEX
from math import ceil, log
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def multi_criteria_main(locator, config):
# local variables
generation = config.multi_criteria.generations
category = "optimization-detailed"
if not os.path.exists(locator.get_address_of_individuals_of_a_generation(generation)):
data_address = locating_individuals_in_generation_script(generation, locator)
else:
data_address = pd.read_csv(locator.get_address_of_individuals_of_a_generation(generation))
# initialize class
data_generation = preprocessing_generations_data(locator, generation)
objectives = data_generation['final_generation']['population']
individual_list = objectives.axes[0].values
data_processed = preprocessing_cost_data(locator, data_generation['final_generation'], individual_list[0], generation, data_address, config)
column_names = data_processed.columns.values
compiled_data = pd.DataFrame(np.zeros([len(individual_list), len(column_names)]), columns=column_names)
for i, individual in enumerate(individual_list):
data_processed = preprocessing_cost_data(locator, data_generation['final_generation'], individual, generation, data_address, config)
for name in column_names:
compiled_data.loc[i][name] = data_processed[name][0]
compiled_data = compiled_data.assign(individual=individual_list)
normalized_TAC = (compiled_data['TAC_Mio'] - min(compiled_data['TAC_Mio'])) / (
max(compiled_data['TAC_Mio']) - min(compiled_data['TAC_Mio']))
normalized_emissions = (compiled_data['total_emissions_kiloton'] - min(compiled_data['total_emissions_kiloton'])) / (
max(compiled_data['total_emissions_kiloton']) - min(compiled_data['total_emissions_kiloton']))
normalized_prim = (compiled_data['total_prim_energy_TJ'] - min(compiled_data['total_prim_energy_TJ'])) / (
max(compiled_data['total_prim_energy_TJ']) - min(compiled_data['total_prim_energy_TJ']))
normalized_Capex_total = (compiled_data['Capex_total_Mio'] - min(compiled_data['Capex_total_Mio'])) / (
max(compiled_data['Capex_total_Mio']) - min(compiled_data['Capex_total_Mio']))
normalized_Opex = (compiled_data['Opex_total_Mio'] - min(compiled_data['Opex_total_Mio'])) / (
max(compiled_data['Opex_total_Mio']) - min(compiled_data['Opex_total_Mio']))
normalized_renewable_share = (compiled_data['renewable_share_electricity'] - min(compiled_data['renewable_share_electricity'])) / (
max(compiled_data['renewable_share_electricity']) - min(compiled_data['renewable_share_electricity']))
compiled_data = compiled_data.assign(normalized_TAC=normalized_TAC)
compiled_data = compiled_data.assign(normalized_emissions=normalized_emissions)
compiled_data = compiled_data.assign(normalized_prim=normalized_prim)
compiled_data = compiled_data.assign(normalized_Capex_total=normalized_Capex_total)
compiled_data = compiled_data.assign(normalized_Opex=normalized_Opex)
compiled_data = compiled_data.assign(normalized_renewable_share=normalized_renewable_share)
compiled_data['TAC_rank'] = compiled_data['normalized_TAC'].rank(ascending=True)
compiled_data['emissions_rank'] = compiled_data['normalized_emissions'].rank(ascending=True)
compiled_data['prim_rank'] = compiled_data['normalized_prim'].rank(ascending=True)
# user defined mcda
compiled_data['user_MCDA'] = compiled_data['normalized_Capex_total'] * config.multi_criteria.capextotal * config.multi_criteria.economicsustainability + \
compiled_data['normalized_Opex'] * config.multi_criteria.opex * config.multi_criteria.economicsustainability + \
compiled_data['normalized_TAC'] * config.multi_criteria.annualizedcosts * config.multi_criteria.economicsustainability + \
compiled_data['normalized_emissions'] *config.multi_criteria.emissions * config.multi_criteria.environmentalsustainability + \
compiled_data['normalized_prim'] *config.multi_criteria.primaryenergy * config.multi_criteria.environmentalsustainability + \
compiled_data['normalized_renewable_share'] * config.multi_criteria.renewableshare * config.multi_criteria.socialsustainability
compiled_data['user_MCDA_rank'] = compiled_data['user_MCDA'].rank(ascending=True)
compiled_data.to_csv(locator.get_multi_criteria_analysis(generation))
return compiled_data
def preprocessing_generations_data(locator, generations):
data_processed = []
with open(locator.get_optimization_checkpoint(generations), "rb") as fp:
data = json.load(fp)
# get lists of data for performance values of the population
costs_Mio = [round(objectives[0] / 1000000, 2) for objectives in
data['population_fitness']] # convert to millions
emissions_kiloton = [round(objectives[1] / 1000000, 2) for objectives in
data['population_fitness']] # convert to tons x 10^3 (kiloton)
prim_energy_TJ = [round(objectives[2] / 1000000, 2) for objectives in
data['population_fitness']] # convert to gigajoules x 10^3 (Terajoules)
individual_names = ['ind' + str(i) for i in range(len(costs_Mio))]
df_population = pd.DataFrame({'Name': individual_names, 'costs_Mio': costs_Mio,
'emissions_kiloton': emissions_kiloton, 'prim_energy_TJ': prim_energy_TJ
}).set_index("Name")
individual_barcode = [[str(ind) if type(ind) == float else str(ind) for ind in
individual] for individual in data['population']]
def_individual_barcode = pd.DataFrame({'Name': individual_names,
'individual_barcode': individual_barcode}).set_index("Name")
# get lists of data for performance values of the population (hall_of_fame
costs_Mio_HOF = [round(objectives[0] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to millions
emissions_kiloton_HOF = [round(objectives[1] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to tons x 10^3
prim_energy_TJ_HOF = [round(objectives[2] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to gigajoules x 10^3
individual_names_HOF = ['ind' + str(i) for i in range(len(costs_Mio_HOF))]
df_halloffame = pd.DataFrame({'Name': individual_names_HOF, 'costs_Mio': costs_Mio_HOF,
'emissions_kiloton': emissions_kiloton_HOF,
'prim_energy_TJ': prim_energy_TJ_HOF}).set_index("Name")
# get dataframe with capacity installed per individual
for i, individual in enumerate(individual_names):
dict_capacities = data['capacities'][i]
dict_network = data['disconnected_capacities'][i]["network"]
list_dict_disc_capacities = data['disconnected_capacities'][i]["disconnected_capacity"]
for building, dict_disconnected in enumerate(list_dict_disc_capacities):
if building == 0:
df_disc_capacities = pd.DataFrame(dict_disconnected, index=[dict_disconnected['building_name']])
else:
df_disc_capacities = df_disc_capacities.append(
pd.DataFrame(dict_disconnected, index=[dict_disconnected['building_name']]))
df_disc_capacities = df_disc_capacities.set_index('building_name')
dict_disc_capacities = df_disc_capacities.sum(axis=0).to_dict() # series with sum of capacities
if i == 0:
df_disc_capacities_final = pd.DataFrame(dict_disc_capacities, index=[individual])
df_capacities = pd.DataFrame(dict_capacities, index=[individual])
df_network = | pd.DataFrame({"network": dict_network}, index=[individual]) | pandas.DataFrame |
"""
Evaluate prediction model for USDCAD spot rate (moving up or down or flat)
"""
__version__ = '0.2'
__author__ = '<NAME>'
import pandas as pd # Version 0.22.0
import numpy as np # Version 1.14.0
from SoftmaxNN import SoftmaxNN # Version 0.2
from utils import normalize_inputs, load_obj # Version 0.1
# Read Inputs ----------------------------------------------------------------------------------------------------------
input_root = 'Inputs/SoftmaxNN/'
output_root = 'Outputs/SoftmaxNN/'
eval_data = | pd.read_csv(input_root+'OoS FX Data.csv', index_col=0) | pandas.read_csv |
import requests
from model.parsers import model as m
import pandas as pd
import datetime
dataset = m.initialize()
unique_dates = list()
raw_data = requests.get('https://api.covid19india.org/states_daily.json')
raw_json = raw_data.json()
for item in raw_json['states_daily']:
if item['date'] not in unique_dates:
unique_dates.append(item['date'])
for date in unique_dates:
for item in raw_json['states_daily']:
if date == item['date']:
for state in dataset:
if date not in state:
state[date] = dict()
state[date][item['status']] = item[state['code']]
def date_validate(date_text):
try:
datetime.datetime.strptime(date_text, '%d-%b-%y')
except ValueError:
print("Incorrect date format, should be dd-Mmm-yy")
return 0
def state_code_validate(state_code):
unique_states = list()
for item in dataset:
unique_states.append(item['code'])
if state_code in unique_states:
return 1
else:
print('Please enter a valid state code')
return 0
def needs_patch(date_to_fetch, state_code):
if (date_to_fetch == '26-Mar-20' and state_code == 'ap') or (date_to_fetch == '16-Mar-20' and state_code == 'mp'):
return True
return False
def apply_patch(date_to_fetch, state_code):
if date_to_fetch == '26-Mar-20' and state_code == 'ap':
return {'Confirmed': '1', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch == '16-Mar-20' and state_code == 'mp':
return {'Confirmed': '0', 'Recovered': '0', 'Deceased': '0'}
def fetch_by_date_and_code(date_to_fetch, state_code):
if(needs_patch(date_to_fetch, state_code)):
return apply_patch(date_to_fetch, state_code)
if date_to_fetch == '26-Mar-20' and state_code == 'ap':
return {'Confirmed': '1', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch == '16-Mar-20' and state_code == 'mp':
return {'Confirmed': '0', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch in unique_dates:
for state in dataset:
if state['code'] == state_code:
if date_to_fetch in state:
return state[date_to_fetch]
else :
print('date does not exist')
def cumulative_datewise_data(date_to_fetch, state_code):
should_stop = False
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
print(unique_date, fetch_by_date_and_code(unique_date, state_code))
if should_stop:
break
def cumulative_data(date_to_fetch, state_code):
should_stop = False
cumulative_dict = dict()
if date_to_fetch in unique_dates:
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
returned_dict = fetch_by_date_and_code(unique_date, state_code)
for key in returned_dict:
if key in cumulative_dict:
cumulative_dict[key] += int(returned_dict[key])
else:
cumulative_dict[key] = int(returned_dict[key])
if should_stop:
break
return cumulative_dict
else:
return 0
def cumulative_series_datewise_data(date_to_fetch, state_code):
should_stop = False
cumulative_series_datewise_dict = dict()
if date_to_fetch in unique_dates:
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
cumulative_series_datewise_dict[unique_date] = cumulative_data(unique_date, state_code)
if should_stop:
break
return cumulative_series_datewise_dict
else:
print('date does not exist')
def cumulative_last_3_days(state_code, should_print = False):
resultset = dict()
for unique_date in unique_dates[-3:]:
resultset[unique_date] = cumulative_data(unique_date, state_code)
if should_print:
print(unique_date, cumulative_data(unique_date, state_code))
return resultset
def cumulative_last_3_days_all_states(choice):
resultset = dict()
for state in dataset:
resultset[state['name']] = cumulative_last_3_days(state['code'], False)
return resultset
def total_count(state_code):
cumulative_dict = dict()
for unique_date in unique_dates:
returned_dict = fetch_by_date_and_code(unique_date, state_code)
for key in returned_dict:
if key in cumulative_dict:
cumulative_dict[key] += int(returned_dict[key])
else:
cumulative_dict[key] = int(returned_dict[key])
return cumulative_dict
def make_data_frame():
unique_states = list()
confirmed_list = list()
recovery_list = list()
deceased_list = list()
for state in dataset[:-1]:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset[:-1]:
status = total_count(state['code'])
confirmed_list.append(status['Confirmed'])
recovery_list.append(status['Recovered'])
deceased_list.append(status['Deceased'])
data = {'STATE/UT':unique_states, 'Confirmed':confirmed_list, 'Recovered':recovery_list, 'Deceased':deceased_list}
df = pd.DataFrame(data, columns = ['STATE/UT', 'Confirmed', 'Recovered', 'Deceased'])
return df
def cumulative_last_3_days_confirmed_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Confirmed'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def cumulative_last_3_days_recovered_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Recovered'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def cumulative_last_3_days_deceased_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Deceased'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def all_data_confirmed():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Confirmed'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def all_data_recovered():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Recovered'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def all_data_deceased():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Deceased'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def cumulative_all_data_confirmed():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
cumulative_dict = cumulative_data(date, state['code'])
dates[date].append(cumulative_dict['Confirmed'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = | pd.DataFrame(data) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in SDC
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numba
import numpy
import operator
import pandas
import math
import sys
from numba.errors import TypingError
from numba.extending import overload, overload_method, overload_attribute
from numba.typing import signature
from numba.extending import intrinsic
from numba import (types, numpy_support, cgutils)
from numba.typed import Dict
from numba import prange
import sdc
import sdc.datatypes.common_functions as common_functions
from sdc.datatypes.common_functions import (TypeChecker, check_index_is_numeric, find_common_dtype_from_numpy_dtypes,
sdc_join_series_indexes)
from sdc.datatypes.hpat_pandas_series_rolling_types import _hpat_pandas_series_rolling_init
from sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType
from sdc.datatypes.hpat_pandas_getitem_types import SeriesGetitemAccessorType
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (StringArrayType, string_array_type, str_arr_is_na, str_arr_set_na,
num_total_chars, pre_alloc_string_array, cp_str_list_to_array)
from sdc.utils import to_array, sdc_overload, sdc_overload_method, sdc_overload_attribute
from sdc.datatypes import hpat_pandas_series_autogenerated
@sdc_overload(operator.getitem)
def hpat_pandas_series_accessor_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesGetitemAccessorType):
return None
accessor = self.accessor.literal_value
if accessor == 'iloc':
if isinstance(idx, (types.List, types.Array, types.SliceType)):
def hpat_pandas_series_iloc_list_slice_impl(self, idx):
result_data = self._series._data[idx]
result_index = self._series.index[idx]
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_iloc_list_slice_impl
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iloc_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iloc_impl
def hpat_pandas_series_iloc_callable_impl(self, idx):
index = numpy.asarray(list(map(idx, self._series._data)))
return pandas.Series(self._series._data[index], self._series.index[index], self._series._name)
return hpat_pandas_series_iloc_callable_impl
raise TypingError('{} The index must be an Integer, Slice or List of Integer or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'iat':
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iat_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iat_impl
raise TypingError('{} The index must be a Integer. Given: {}'.format(_func_name, idx))
if accessor == 'loc':
# Note: Loc return Series
# Note: Index 0 in slice not supported
# Note: Loc slice and callable with String not implement
index_is_none = (self.series.index is None or
isinstance(self.series.index, numba.types.misc.NoneType))
if isinstance(idx, types.SliceType) and index_is_none:
def hpat_pandas_series_loc_slice_noidx_impl(self, idx):
max_slice = sys.maxsize
start = idx.start
stop = idx.stop
if idx.stop == max_slice:
stop = max_slice - 1
result_data = self._series._data[start:stop+1]
result_index = numpy.arange(start, stop + 1)
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_loc_slice_noidx_impl
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_loc_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._series._data[mask], index[mask], self._series._name)
return hpat_pandas_series_loc_impl
raise TypingError('{} The index must be an Number, Slice, String, List, Array or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'at':
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_at_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return self._series._data[mask]
return hpat_pandas_series_at_impl
raise TypingError('{} The index must be a Number or String. Given: {}'.format(_func_name, idx))
raise TypingError('{} Unknown accessor. Only "loc", "iloc", "at", "iat" are supported.\
Given: {}'.format(_func_name, accessor))
@sdc_overload(operator.getitem)
def hpat_pandas_series_getitem(self, idx):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.get
Limitations
-----------
Supported ``key`` can be one of the following:
- Integer scalar, e.g. :obj:`series[0]`
- A slice, e.g. :obj:`series[2:5]`
- Another series
Examples
--------
.. literalinclude:: ../../../examples/series_getitem.py
:language: python
:lines: 27-
:caption: Getting Pandas Series elements
:name: ex_series_getitem
.. command-output:: python ./series_getitem.py
:cwd: ../../../examples
.. todo:: Fix SDC behavior and add the expected output of the > python ./series_getitem.py to the docstring
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesType):
return None
# Note: Getitem return Series
index_is_none = isinstance(self.index, numba.types.misc.NoneType)
index_is_none_or_numeric = index_is_none or (self.index and isinstance(self.index.dtype, types.Number))
index_is_string = not index_is_none and isinstance(self.index.dtype, (types.UnicodeType, types.StringLiteral))
if (
isinstance(idx, types.Number) and index_is_none_or_numeric or
(isinstance(idx, (types.UnicodeType, types.StringLiteral)) and index_is_string)
):
def hpat_pandas_series_getitem_index_impl(self, idx):
index = self.index
mask = numpy.empty(len(self._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._data[mask], index[mask], self._name)
return hpat_pandas_series_getitem_index_impl
if (isinstance(idx, types.Integer) and index_is_string):
def hpat_pandas_series_idx_impl(self, idx):
return self._data[idx]
return hpat_pandas_series_idx_impl
if isinstance(idx, types.SliceType):
# Return slice for str values not implement
def hpat_pandas_series_getitem_idx_slice_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_slice_impl
if (
isinstance(idx, (types.List, types.Array)) and
isinstance(idx.dtype, (types.Boolean, bool))
):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (index_is_none and isinstance(idx, SeriesType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
index = numpy.arange(len(self._data))
if (index != idx.index).sum() == 0:
return pandas.Series(self._data[idx._data], index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
res = numpy.copy(self._data[:len(idx._data)])
index = numpy.arange(len(self._data))
for i in numba.prange(len(res)):
for j in numba.prange(len(index)):
if j == idx._data[i]:
res[i] = self._data[j]
return pandas.Series(res, index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (isinstance(idx, SeriesType) and not isinstance(self.index, types.NoneType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
# Series with str index not implement
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
if (self._index != idx._index).sum() == 0:
return pandas.Series(self._data[idx._data], self._index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_series_impl
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
index = self.index
data = self._data
size = len(index)
data_res = []
index_res = []
for value in idx._data:
mask = numpy.zeros(shape=size, dtype=numpy.bool_)
for i in numba.prange(size):
mask[i] = index[i] == value
data_res.extend(data[mask])
index_res.extend(index[mask])
return pandas.Series(data=data_res, index=index_res, name=self._name)
return hpat_pandas_series_getitem_idx_series_impl
raise TypingError('{} The index must be an Number, Slice, String, Boolean Array or a Series.\
Given: {}'.format(_func_name, idx))
@sdc_overload(operator.setitem)
def hpat_pandas_series_setitem(self, idx, value):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.__setitem__
Examples
--------
.. literalinclude:: ../../../examples/series_setitem_int.py
:language: python
:lines: 27-
:caption: Setting Pandas Series elements
:name: ex_series_setitem
.. code-block:: console
> python ./series_setitem_int.py
0 0
1 4
2 3
3 2
4 1
dtype: int64
> python ./series_setitem_slice.py
0 5
1 4
2 0
3 0
4 0
dtype: int64
> python ./series_setitem_series.py
0 5
1 0
2 3
3 0
4 1
dtype: int64
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.set` implementation
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_setitem*
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
value: :object
input value
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
ty_checker = TypeChecker('Operator setitem.')
ty_checker.check(self, SeriesType)
if not (isinstance(idx, (types.Integer, types.SliceType, SeriesType))):
ty_checker.raise_exc(idx, 'int, Slice, Series', 'idx')
if not((isinstance(value, SeriesType) and isinstance(value.dtype, self.dtype)) or \
isinstance(value, type(self.dtype))):
ty_checker.raise_exc(value, self.dtype, 'value')
if isinstance(idx, types.Integer) or isinstance(idx, types.SliceType):
def hpat_pandas_series_setitem_idx_integer_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_value
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_slice
"""
self._data[idx] = value
return self
return hpat_pandas_series_setitem_idx_integer_impl
if isinstance(idx, SeriesType):
def hpat_pandas_series_setitem_idx_series_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_series
"""
super_index = idx._data
self._data[super_index] = value
return self
return hpat_pandas_series_setitem_idx_series_impl
@sdc_overload_attribute(SeriesType, 'iloc')
def hpat_pandas_series_iloc(self):
"""
Pandas Series method :meth:`pandas.Series.iloc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iloc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iloc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iloc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iloc')
return hpat_pandas_series_iloc_impl
@sdc_overload_attribute(SeriesType, 'loc')
def hpat_pandas_series_loc(self):
"""
Pandas Series method :meth:`pandas.Series.loc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_loc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute loc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_loc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'loc')
return hpat_pandas_series_loc_impl
@sdc_overload_attribute(SeriesType, 'iat')
def hpat_pandas_series_iat(self):
"""
Pandas Series method :meth:`pandas.Series.iat` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iat*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iat().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iat_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iat')
return hpat_pandas_series_iat_impl
@sdc_overload_attribute(SeriesType, 'at')
def hpat_pandas_series_at(self):
"""
Pandas Series method :meth:`pandas.Series.at` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_at*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute at().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_at_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'at')
return hpat_pandas_series_at_impl
@sdc_overload_method(SeriesType, 'nsmallest')
def hpat_pandas_series_nsmallest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nsmallest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nsmallest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nsmallest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nsmallest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nsmallest(). Unsupported parameter. Given 'keep' != 'first'")
# mergesort is used for stable sorting of repeated values
indices = self._data.argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nsmallest_impl
@sdc_overload_method(SeriesType, 'nlargest')
def hpat_pandas_series_nlargest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nlargest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nlargest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nlargest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nlargest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nlargest(). Unsupported parameter. Given 'keep' != 'first'")
# data: [0, 1, -1, 1, 0] -> [1, 1, 0, 0, -1]
# index: [0, 1, 2, 3, 4] -> [1, 3, 0, 4, 2] (not [3, 1, 4, 0, 2])
# subtract 1 to ensure reverse ordering at boundaries
indices = (-self._data - 1).argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nlargest_impl
@sdc_overload_attribute(SeriesType, 'shape')
def hpat_pandas_series_shape(self):
"""
Pandas Series attribute :attr:`pandas.Series.shape` implementation
**Algorithm**: result = series.shape
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shape1
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:obj:`tuple`
a tuple of the shape of the underlying data
"""
_func_name = 'Attribute shape.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl
@sdc_overload_method(SeriesType, 'std')
def hpat_pandas_series_std(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.std` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method std().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_std_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
var = self.var(axis=axis, skipna=skipna, level=level, ddof=ddof, numeric_only=numeric_only)
return var ** 0.5
return hpat_pandas_series_std_impl
@sdc_overload_attribute(SeriesType, 'values')
def hpat_pandas_series_values(self):
"""
Pandas Series attribute 'values' implementation.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.values.html#pandas.Series.values
Algorithm: result = series.values
Where:
series: pandas.series
result: pandas.series as ndarray
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_values
"""
_func_name = 'Attribute values.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_values_impl(self):
return self._data
return hpat_pandas_series_values_impl
@sdc_overload_method(SeriesType, 'value_counts')
def hpat_pandas_series_value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.value_counts
Examples
--------
.. literalinclude:: ../../../examples/series/series_value_counts.py
:language: python
:lines: 27-
:caption: Getting the number of values excluding NaNs
:name: ex_series_value_counts
.. command-output:: python ./series/series_value_counts.py
:cwd: ../../../examples
.. note::
Parameter bins and dropna for Strings are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.count <pandas.Series.count>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.value_counts` implementation.
Note: Elements with the same count might appear in result in a different order than in Pandas
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_value_counts*
Parameters
-----------
self: :obj:`pandas.Series`
input series
normalize: :obj:`boolean`, default False
If True then the object returned will contain the relative frequencies of the unique values
sort: :obj: `boolean`, default True
Sort by frequencies
ascending: :obj:`boolean`, default False
Sort in ascending order
bins: :obj:`integer`, default None
*unsupported*
dropna: :obj:`boolean`, default True
Skip counts of NaN
Returns
-------
:returns :obj:`pandas.Series`
"""
_func_name = 'Method value_counts().'
ty_checker = TypeChecker('Method value_counts().')
ty_checker.check(self, SeriesType)
if not isinstance(normalize, (types.Omitted, types.Boolean, bool)) and normalize is True:
ty_checker.raise_exc(normalize, 'boolean', 'normalize')
if not isinstance(sort, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(sort, 'boolean', 'sort')
if not isinstance(ascending, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(ascending, 'boolean', 'ascending')
if not isinstance(bins, (types.Omitted, types.NoneType)) and bins is not None:
ty_checker.raise_exc(bins, 'boolean', 'bins')
if not isinstance(dropna, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(dropna, 'boolean', 'dropna')
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_value_counts_str_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=types.unicode_type,
value_type=types.intp
)
nan_counts = 0
for i, value in enumerate(self._data):
if str_arr_is_na(self._data, i):
if not dropna:
nan_counts += 1
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
need_add_nan_count = not dropna and nan_counts
values = [key for key in value_counts_dict]
counts_as_list = [value_counts_dict[key] for key in value_counts_dict.keys()]
values_len = len(values)
if need_add_nan_count:
# append a separate empty string for NaN elements
values_len += 1
values.append('')
counts_as_list.append(nan_counts)
counts = numpy.asarray(counts_as_list, dtype=numpy.intp)
indexes_order = numpy.arange(values_len)
if sort:
indexes_order = counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
counts_sorted = numpy.take(counts, indexes_order)
values_sorted_by_count = [values[i] for i in indexes_order]
# allocate the result index as a StringArray and copy values to it
index_string_lengths = numpy.asarray([len(s) for s in values_sorted_by_count])
index_total_chars = numpy.sum(index_string_lengths)
result_index = pre_alloc_string_array(len(values_sorted_by_count), index_total_chars)
cp_str_list_to_array(result_index, values_sorted_by_count)
if need_add_nan_count:
# set null bit for StringArray element corresponding to NaN element (was added as last in values)
index_previous_nan_pos = values_len - 1
for i in numpy.arange(values_len):
if indexes_order[i] == index_previous_nan_pos:
str_arr_set_na(result_index, i)
break
return pandas.Series(counts_sorted, index=result_index, name=self._name)
return hpat_pandas_series_value_counts_str_impl
elif isinstance(self.dtype, types.Number):
series_dtype = self.dtype
def hpat_pandas_series_value_counts_number_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=series_dtype,
value_type=types.intp
)
zero_counts = 0
is_zero_found = False
for value in self._data:
if (dropna and numpy.isnan(value)):
continue
# Pandas hash-based value_count_float64 function doesn't distinguish between
# positive and negative zeros, hence we count zero values separately and store
# as a key the first zero value found in the Series
if not value:
zero_counts += 1
if not is_zero_found:
zero_value = value
is_zero_found = True
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
if zero_counts:
value_counts_dict[zero_value] = zero_counts
unique_values = numpy.asarray(
list(value_counts_dict),
dtype=self._data.dtype
)
value_counts = numpy.asarray(
[value_counts_dict[key] for key in value_counts_dict],
dtype=numpy.intp
)
indexes_order = numpy.arange(len(value_counts))
if sort:
indexes_order = value_counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
sorted_unique_values = numpy.take(unique_values, indexes_order)
sorted_value_counts = numpy.take(value_counts, indexes_order)
return pandas.Series(sorted_value_counts, index=sorted_unique_values, name=self._name)
return hpat_pandas_series_value_counts_number_impl
return None
@sdc_overload_method(SeriesType, 'var')
def hpat_pandas_series_var(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.var` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method var().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_var_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
if skipna is None:
skipna = True
if skipna:
valuable_length = len(self._data) - numpy.sum(numpy.isnan(self._data))
if valuable_length <= ddof:
return numpy.nan
return numpy.nanvar(self._data) * valuable_length / (valuable_length - ddof)
if len(self._data) <= ddof:
return numpy.nan
return self._data.var() * len(self._data) / (len(self._data) - ddof)
return hpat_pandas_series_var_impl
@sdc_overload_attribute(SeriesType, 'index')
def hpat_pandas_series_index(self):
"""
Pandas Series attribute :attr:`pandas.Series.index` implementation
**Algorithm**: result = series.index
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index1
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index2
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
the index of the Series
"""
_func_name = 'Attribute index.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_index_none_impl(self):
return numpy.arange(len(self._data))
return hpat_pandas_series_index_none_impl
else:
def hpat_pandas_series_index_impl(self):
return self._index
return hpat_pandas_series_index_impl
@sdc_overload_method(SeriesType, 'rolling')
def hpat_pandas_series_rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.rolling
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_min.py
:language: python
:lines: 27-
:caption: Calculate the rolling minimum.
:name: ex_series_rolling
.. command-output:: python ./series/rolling/series_rolling_min.py
:cwd: ../../../examples
.. todo:: Add support of parameters ``center``, ``win_type``, ``on``, ``axis`` and ``closed``
.. seealso::
:ref:`expanding <pandas.Series.expanding>`
Provides expanding transformations.
:ref:`ewm <pandas.Series.ewm>`
Provides exponential weighted functions.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series attribute :attr:`pandas.Series.rolling` implementation
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling
Parameters
----------
series: :obj:`pandas.Series`
Input Series.
window: :obj:`int` or :obj:`offset`
Size of the moving window.
min_periods: :obj:`int`
Minimum number of observations in window required to have a value.
center: :obj:`bool`
Set the labels at the center of the window.
*unsupported*
win_type: :obj:`str`
Provide a window type.
*unsupported*
on: :obj:`str`
Column on which to calculate the rolling window.
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
closed: :obj:`str`
Make the interval closed on the ‘right’, ‘left’, ‘both’ or ‘neither’ endpoints.
*unsupported*
Returns
-------
:class:`pandas.Series.rolling`
Output class to manipulate with input data.
"""
ty_checker = TypeChecker('Method rolling().')
ty_checker.check(self, SeriesType)
if not isinstance(window, types.Integer):
ty_checker.raise_exc(window, 'int', 'window')
minp_accepted = (types.Omitted, types.NoneType, types.Integer)
if not isinstance(min_periods, minp_accepted) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'None, int', 'min_periods')
center_accepted = (types.Omitted, types.Boolean)
if not isinstance(center, center_accepted) and center is not False:
ty_checker.raise_exc(center, 'bool', 'center')
str_types = (types.Omitted, types.NoneType, types.StringLiteral, types.UnicodeType)
if not isinstance(win_type, str_types) and win_type is not None:
ty_checker.raise_exc(win_type, 'str', 'win_type')
if not isinstance(on, str_types) and on is not None:
ty_checker.raise_exc(on, 'str', 'on')
axis_accepted = (types.Omitted, types.Integer, types.StringLiteral, types.UnicodeType)
if not isinstance(axis, axis_accepted) and axis != 0:
ty_checker.raise_exc(axis, 'int, str', 'axis')
if not isinstance(closed, str_types) and closed is not None:
ty_checker.raise_exc(closed, 'str', 'closed')
nan_minp = isinstance(min_periods, (types.Omitted, types.NoneType)) or min_periods is None
def hpat_pandas_series_rolling_impl(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
if window < 0:
raise ValueError('window must be non-negative')
if nan_minp == True: # noqa
minp = window
else:
minp = min_periods
if minp < 0:
raise ValueError('min_periods must be >= 0')
if minp > window:
raise ValueError('min_periods must be <= window')
if center != False: # noqa
raise ValueError('Method rolling(). The object center\n expected: False')
if win_type is not None:
raise ValueError('Method rolling(). The object win_type\n expected: None')
if on is not None:
raise ValueError('Method rolling(). The object on\n expected: None')
if axis != 0:
raise ValueError('Method rolling(). The object axis\n expected: 0')
if closed is not None:
raise ValueError('Method rolling(). The object closed\n expected: None')
return _hpat_pandas_series_rolling_init(self, window, minp, center,
win_type, on, axis, closed)
return hpat_pandas_series_rolling_impl
@sdc_overload_attribute(SeriesType, 'size')
def hpat_pandas_series_size(self):
"""
Pandas Series attribute :attr:`pandas.Series.size` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_size
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
Return the number of elements in the underlying data.
"""
_func_name = 'Attribute size.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_size_impl(self):
return len(self._data)
return hpat_pandas_series_size_impl
@sdc_overload_attribute(SeriesType, 'str')
def hpat_pandas_series_str(self):
"""
Pandas Series attribute :attr:`pandas.Series.str` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_get
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.core.strings.StringMethods`
Output class to manipulate with input data.
"""
_func_name = 'Attribute str.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.List, types.UnicodeType)):
msg = '{} Can only use .str accessor with string values. Given: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
def hpat_pandas_series_str_impl(self):
return pandas.core.strings.StringMethods(self)
return hpat_pandas_series_str_impl
@sdc_overload_attribute(SeriesType, 'ndim')
def hpat_pandas_series_ndim(self):
"""
Pandas Series attribute :attr:`pandas.Series.ndim` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_getattr_ndim
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`int`
Number of dimensions of the underlying data, by definition 1
"""
_func_name = 'Attribute ndim.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_ndim_impl(self):
return 1
return hpat_pandas_series_ndim_impl
@sdc_overload_attribute(SeriesType, 'T')
def hpat_pandas_series_T(self):
"""
Pandas Series attribute :attr:`pandas.Series.T` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_getattr_T
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`numpy.ndarray`
An array representing the underlying data
"""
_func_name = 'Attribute T.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_T_impl(self):
return self._data
return hpat_pandas_series_T_impl
@sdc_overload(len)
def hpat_pandas_series_len(self):
"""
Pandas Series operator :func:`len` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_len
Parameters
----------
series: :class:`pandas.Series`
Returns
-------
:obj:`int`
number of items in the object
"""
_func_name = 'Operator len().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_len_impl(self):
return len(self._data)
return hpat_pandas_series_len_impl
@sdc_overload_method(SeriesType, 'astype')
def hpat_pandas_series_astype(self, dtype, copy=True, errors='raise'):
"""
Pandas Series method :meth:`pandas.Series.astype` implementation.
Cast a pandas object to a specified dtype dtype
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_astype*
Parameters
-----------
dtype : :obj:`numpy.dtype` or :obj:`dict`
Use a numpy.dtype or Python type to cast entire pandas object to the same type.
Alternatively, use {col: dtype, …}, where col is a column label and dtype is a numpy.dtype
or Python type to cast one or more of the DataFrame’s columns to column-specific types.
copy : :obj:`bool`, default :obj:`True`
Return a copy when True
Currently copy=False is not supported
errors : :obj:`str`, default :obj:`'raise'`
Control raising of exceptions on invalid data for provided dtype.
* raise : allow exceptions to be raised
* ignore : suppress exceptions. On error return original object
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` Cast a :obj:`pandas.Series` to a specified dtype dtype
"""
_func_name = 'Method astype().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(copy, (types.Omitted, bool, types.Boolean)):
raise TypingError('{} The object must be a boolean. Given copy: {}'.format(_func_name, copy))
if (not isinstance(errors, (types.Omitted, str, types.UnicodeType, types.StringLiteral)) and
errors in ('raise', 'ignore')):
raise TypingError('{} The object must be a string literal. Given errors: {}'.format(_func_name, errors))
# Return StringArray for astype(str) or astype('str')
def hpat_pandas_series_astype_to_str_impl(self, dtype, copy=True, errors='raise'):
num_chars = 0
arr_len = len(self._data)
# Get total chars for new array
for i in numba.parfor.internal_prange(arr_len):
item = self._data[i]
num_chars += len(str(item)) # TODO: check NA
data = sdc.str_arr_ext.pre_alloc_string_array(arr_len, num_chars)
for i in numba.parfor.internal_prange(arr_len):
item = self._data[i]
data[i] = str(item) # TODO: check NA
return pandas.Series(data, self._index, self._name)
# Return npytypes.Array from npytypes.Array for astype(types.functions.NumberClass), example - astype(np.int64)
def hpat_pandas_series_astype_numba_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data.astype(dtype), self._index, self._name)
# Return npytypes.Array from npytypes.Array for astype(types.StringLiteral), example - astype('int64')
def hpat_pandas_series_astype_literal_type_numba_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data.astype(numpy.dtype(dtype)), self._index, self._name)
# Return self
def hpat_pandas_series_astype_no_modify_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data, self._index, self._name)
if ((isinstance(dtype, types.Function) and dtype.typing_key == str)
or (isinstance(dtype, types.StringLiteral) and dtype.literal_value == 'str')):
return hpat_pandas_series_astype_to_str_impl
# Needs Numba astype impl support converting unicode_type to NumberClass and other types
if isinstance(self.data, StringArrayType):
if isinstance(dtype, types.functions.NumberClass) and errors == 'raise':
raise TypingError(f'Needs Numba astype impl support converting unicode_type to {dtype}')
if isinstance(dtype, types.StringLiteral) and errors == 'raise':
try:
literal_value = numpy.dtype(dtype.literal_value)
except:
pass # Will raise the exception later
else:
raise TypingError(f'Needs Numba astype impl support converting unicode_type to {dtype.literal_value}')
if isinstance(self.data, types.npytypes.Array) and isinstance(dtype, types.functions.NumberClass):
return hpat_pandas_series_astype_numba_impl
if isinstance(self.data, types.npytypes.Array) and isinstance(dtype, types.StringLiteral):
try:
literal_value = numpy.dtype(dtype.literal_value)
except:
pass # Will raise the exception later
else:
return hpat_pandas_series_astype_literal_type_numba_impl
# Raise error if dtype is not supported
if errors == 'raise':
raise TypingError(f'{_func_name} The object must be a supported type. Given dtype: {dtype}')
else:
return hpat_pandas_series_astype_no_modify_impl
@sdc_overload_method(SeriesType, 'shift')
def hpat_pandas_series_shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Pandas Series method :meth:`pandas.Series.shift` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_full
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_fill_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
periods: :obj:`int`
Number of periods to shift. Can be positive or negative.
freq: :obj:`DateOffset`, :obj:`tseries.offsets`, :obj:`timedelta`, :obj:`str`
Offset to use from the tseries module or time rule (e.g. ‘EOM’).
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
fill_value : :obj:`int`, :obj:`float`
The scalar value to use for newly introduced missing values.
Returns
-------
:obj:`scalar`
returns :obj:`series` object
"""
_func_name = 'Method shift().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(fill_value, (types.Omitted, types.Number, types.NoneType)) and fill_value is not None:
raise TypingError('{} The object must be a number. Given fill_value: {}'.format(_func_name, fill_value))
if not isinstance(freq, (types.Omitted, types.NoneType)) and freq is not None:
raise TypingError('{} Unsupported parameters. Given freq: {}'.format(_func_name, freq))
if not isinstance(axis, (types.Omitted, int, types.Integer)) and not axis:
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
fill_is_default = isinstance(fill_value, (types.Omitted, types.NoneType)) or fill_value is None
series_np_dtype = [numpy_support.as_dtype(self.data.dtype)]
fill_np_dtype = [numpy.float64 if fill_is_default else numpy_support.as_dtype(fill_value)]
fill_dtype = types.float64 if fill_is_default else fill_value
common_dtype = find_common_dtype_from_numpy_dtypes([], [self.data.dtype, fill_dtype])
if fill_is_default:
def hpat_pandas_series_shift_impl(self, periods=1, freq=None, axis=0, fill_value=None):
if axis != 0:
raise TypingError('Method shift(). Unsupported parameters. Given axis != 0')
arr = numpy.empty(shape=len(self._data), dtype=common_dtype)
if periods > 0:
arr[:periods] = numpy.nan
arr[periods:] = self._data[:-periods]
elif periods < 0:
arr[periods:] = numpy.nan
arr[:periods] = self._data[-periods:]
else:
arr[:] = self._data
return pandas.Series(data=arr, index=self._index, name=self._name)
return hpat_pandas_series_shift_impl
def hpat_pandas_series_shift_impl(self, periods=1, freq=None, axis=0, fill_value=None):
if axis != 0:
raise TypingError('Method shift(). Unsupported parameters. Given axis != 0')
arr = numpy.empty(len(self._data), dtype=common_dtype)
if periods > 0:
arr[:periods] = fill_value
arr[periods:] = self._data[:-periods]
elif periods < 0:
arr[periods:] = fill_value
arr[:periods] = self._data[-periods:]
else:
arr[:] = self._data
return pandas.Series(data=arr, index=self._index, name=self._name)
return hpat_pandas_series_shift_impl
@sdc_overload_method(SeriesType, 'isin')
def hpat_pandas_series_isin(self, values):
"""
Pandas Series method :meth:`pandas.Series.isin` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isin_list1
Parameters
-----------
values : :obj:`list` or :obj:`set` object
specifies values to look for in the series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object indicating if each element of self is in values
"""
_func_name = 'Method isin().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(values, (types.Set, types.List)):
raise TypingError(
'{} The argument must be set or list-like object. Given values: {}'.format(_func_name, values))
def hpat_pandas_series_isin_impl(self, values):
# TODO: replace with below line when Numba supports np.isin in nopython mode
# return pandas.Series(np.isin(self._data, values))
return pandas.Series(data=[(x in values) for x in self._data], index=self._index, name=self._name)
return hpat_pandas_series_isin_impl
@sdc_overload_method(SeriesType, 'append')
def hpat_pandas_series_append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Pandas Series method :meth:`pandas.Series.append` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_append*
Parameters
-----------
self: :obj:`pandas.Series`
input series
to_append : :obj:`pandas.Series` object or :obj:`list` or :obj:`set`
Series (or list or tuple of Series) to append with self
ignore_index: :obj:`bool`, default False
If True, do not use the index labels.
Supported as literal value only
verify_integrity: :obj:`bool`, default False
If True, raise Exception on creating index with duplicates.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
Concatenated Series
"""
_func_name = 'Method append().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not (isinstance(to_append, SeriesType)
or (isinstance(to_append, (types.UniTuple, types.List)) and isinstance(to_append.dtype, SeriesType))):
raise TypingError(
'{} The argument must be a pandas.series or list/tuple of pandas.series. \
Given to_append: {}'.format(_func_name, to_append))
# currently we will always raise this in the end, i.e. if no impl was found
# TODO: find a way to stop compilation early and not proceed with unliteral step
if not (isinstance(ignore_index, types.Literal) and isinstance(ignore_index, types.Boolean)
or isinstance(ignore_index, types.Omitted)
or ignore_index is False):
raise TypingError(
'{} The ignore_index must be a literal Boolean constant. Given: {}'.format(_func_name, ignore_index))
if not (verify_integrity is False or isinstance(verify_integrity, types.Omitted)):
raise TypingError(
'{} Unsupported parameters. Given verify_integrity: {}'.format(_func_name, verify_integrity))
# ignore_index value has to be known at compile time to select between implementations with different signatures
ignore_index_is_false = (common_functions.has_literal_value(ignore_index, False)
or common_functions.has_python_value(ignore_index, False)
or isinstance(ignore_index, types.Omitted))
to_append_is_series = isinstance(to_append, SeriesType)
if ignore_index_is_false:
def hpat_pandas_series_append_impl(self, to_append, ignore_index=False, verify_integrity=False):
if to_append_is_series == True: # noqa
new_data = common_functions.hpat_arrays_append(self._data, to_append._data)
new_index = common_functions.hpat_arrays_append(self.index, to_append.index)
else:
data_arrays_to_append = [series._data for series in to_append]
index_arrays_to_append = [series.index for series in to_append]
new_data = common_functions.hpat_arrays_append(self._data, data_arrays_to_append)
new_index = common_functions.hpat_arrays_append(self.index, index_arrays_to_append)
return | pandas.Series(new_data, new_index) | pandas.Series |
import config
import pandas as pd
path_to_dir= config.path_file_name['path_result']
path_to_file_ori= config.path_file_name['label_raw']
path_to_file_dest= config.path_file_name['label_process']
def pre_process_label(path_to_dir, path_to_file_ori, path_to_file_dest):
out_data = []
pre_df = | pd.read_csv(path_to_dir + path_to_file_ori) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 15:09:59 2016
@author: MichaelEK
"""
import numpy as np
import pandas as pd
import os
import geopandas as gpd
import xarray as xr
from niwa import rd_niwa_vcsn
from pdsql import mssql
from hydrointerp import interp2d
from gistools import vector
import seaborn as sns
import statsmodels as sm
from hydrolm import LM
#from import_fun import rd_vcn
#from ts_stats_fun import w_resample, flow_stats
#from hydro_plot_fun import mon_boxplot, dual_mon_boxplot, multi_yr_barplot, reg_plot
pd.options.display.max_columns = 10
######################################
### Parameters
py_path = os.path.realpath(os.path.dirname(__file__))
base_path = os.path.split(py_path)[0]
shp_dir = 'input_files'
output_dir = 'output_files'
rec_streams_shp = 'rec_streams_pareora.shp'
catch_del_shp = 'pareora_catchments.shp'
catch_shp = 'catchment_pareora.shp'
rec_streams_shp_path = os.path.join(base_path, shp_dir, rec_streams_shp)
catch_del_shp_path = os.path.join(base_path, shp_dir, catch_del_shp)
catch_shp_path = os.path.join(base_path, shp_dir, catch_shp)
server = 'edwprod01'
database = 'hydro'
sites_table = 'ExternalSite'
ts_summ_table = 'TSDataNumericDailySumm'
ts_table = 'TSDataNumericDaily'
#rec_sites = ['70105', '70103']
rec_site = '70105'
mtypes = ['precip', 'PET' ]
from_date = '1982-07-01'
to_date = '2015-06-30'
flow_csv = 'pareora_huts_flow.csv'
usage_csv = 'pareora_huts_usage_mon.csv'
sites = ['403711', '405711', '404810', '405910', '414110', '405610', '403601', '417110', '407810', '400910']
#####################################
### Create dataframe from many VCN csv files
both1 = rd_niwa_vcsn(mtypes, catch_shp_path, buffer_dis=10000)
## Aggregate by month
both2 = both1.groupby(['x', 'y', pd.Grouper(key='time', freq='M')]).sum()
both3 = both2.loc[(slice(None), slice(None), slice(from_date, to_date)), :].copy()
## Resample in 2D
pe1 = interp2d.points_to_grid(both3.reset_index(), 'time', 'x', 'y', 'pe', 1000, 4326, 2193)
rain1 = interp2d.points_to_grid(both3.reset_index(), 'time', 'x', 'y', 'rain', 1000, 4326, 2193)
pe2 = pe1.to_dataframe().dropna()
rain2 = rain1.to_dataframe().dropna()
## Combine datasets
both4 = pd.concat([rain2, pe2], axis=1).reset_index()
## Aggregate by catchment
both5 = vector.xy_to_gpd(['time', 'rain', 'pe'], 'x', 'y', both4)
#pts0 = both4[both4.time == '1982-07-31'].copy()
#pts0.index.name = 'index'
#pts1 = vector.xy_to_gpd(pts0.index, 'x', 'y', pts0)
catch_del = gpd.read_file(catch_del_shp_path)
catch_del.rename(columns={'SITENUMBER': 'site'}, inplace=True)
pts2, poly1 = vector.pts_poly_join(both5, catch_del, 'site')
catch_agg1 = pts2.groupby(['site', 'time'])[['rain', 'pe']].mean()
## Adjust the vcsn according to the precip gauge 404810
ts1 = mssql.rd_sql_ts(server, database, ts_table, 'ExtSiteID', 'DateTime', 'Value', where_in={'ExtSiteID': ['404810'],'DatasetTypeID': [15]}, from_date='2005-07-01', to_date=to_date)
ts1 = ts1.droplevel(0)
rain3 = catch_agg1.loc[70103, 'rain']
gauge1 = ts1.resample('A-JUN').sum().Value * 1.07
gauge1.name = '404810'
ols1 = LM(rain3.to_frame(), gauge1.to_frame()).predict()
ols_summ = ols1.summary_df.copy()
rain4 = (rain3 * float(ols_summ['x slopes'][0]) + ols_summ['y intercept'][0]).reset_index()
rain4['site'] = 70105
rain4.set_index(['site', 'time'], inplace=True)
## Combine with flow
#flow1 = mssql.rd_sql_ts(server, database, ts_table, 'ExtSiteID', 'DateTime', 'Value', where_in={'DatasetTypeID': [5], 'ExtSiteID': rec_sites}, from_date=from_date, to_date=to_date).reset_index()
#flow1.rename(columns={'ExtSiteID': 'site', 'DateTime': 'time', 'Value': 'flow'}, inplace=True)
#flow1.to_csv(os.path.join(base_path, shp_dir, flow_output), index=False)
flow1 = pd.read_csv(os.path.join(base_path, shp_dir, flow_csv), parse_dates=['time'], infer_datetime_format=True)
flow1['site'] = flow1['site'].astype(str)
flow1 = flow1[flow1.site == rec_site]
flow1['flow'] = flow1['flow'] * 60*60*24
flow2 = flow1.groupby(['site', pd.Grouper(key='time', freq='A-JUN')]).sum().reset_index()
flow2['site'] = pd.to_numeric(flow2['site'])
## Add Usage
usage1 = pd.read_csv(os.path.join(base_path, shp_dir, usage_csv), parse_dates=['time'], infer_datetime_format=True)
usage1 = usage1[(usage1.time >= from_date) & (usage1.time <= to_date)]
usage2 = usage1.groupby('time')['sd_usage'].sum().reset_index()
usage2['site'] = 70105
flow2a = pd.merge(flow2, usage2, on=['site', 'time'])
flow2a['flow'] = flow2a['flow'] + flow2a['sd_usage']
## Normalise to area
poly1['area'] = poly1.area
poly2 = poly1.drop('geometry', axis=1).copy()
flow3 = pd.merge(flow2a, poly2, on='site').drop('sd_usage', axis=1)
flow3['flow_mm'] = flow3['flow'] / flow3['area'] * 1000
flow4 = flow3.drop(['flow', 'area'], axis=1).set_index(['site', 'time']).copy()
catch_agg2 = pd.concat([rain4, flow4, catch_agg1['pe']], axis=1)
## Calc AET
catch_agg2['AET'] = catch_agg2['rain'] - catch_agg2['flow_mm']
## Testing
t1 = catch_agg2.reset_index()
t2 = t1[t1.site == 70105].drop('site', axis=1).copy()
t2.set_index('time', inplace=True)
t3 = t2.resample('A-JUN').mean()
sns.regplot('AET', 'pe', data=t2)
ts1 = mssql.rd_sql(server, database, ts_table, ['DateTime', 'Value'], where_in={'ExtSiteID': ['404810'],'DatasetTypeID': [15]})
ts1['DateTime'] = pd.to_datetime(ts1['DateTime'])
ts1.set_index('DateTime', inplace=True)
day_mean1 = rain2.groupby(level='time').mean()
sum1 = day_mean1.resample('M').sum()['2005-07-01':'2015-06-30']
sum4 = ts1.resample('M').sum()['2005-07-01':'2015-06-30']
sns.regplot(sum1.rain.apply(np.log), sum4.Value.apply(np.log))
sns.regplot(sum1.rain, sum4.Value)
sum1b = day_mean1.resample('A-JUN').sum()['2005-07-01':'2015-06-30']
sum1b.columns = ['vcsn']
sum4b = ts1.resample('A-JUN').sum()['2005-07-01':'2015-06-30']
sum4b.columns = ['404810']
sns.regplot(sum1b['vcsn'], sum4b['404810'])
### Comparisons of VCSN to stations
pts0 = mssql.rd_sql(server, database, sites_table, ['ExtSiteID', 'NZTMX', 'NZTMY'], where_in={'ExtSiteID': sites}, rename_cols=['site', 'x', 'y']).astype(int)
pts1 = interp2d.points_to_points(both3.reset_index(), 'time', 'x', 'y', 'rain', pts0, 4326, 2193).reset_index().dropna()
pts1['x'] = pts1['x'].round().astype(int)
pts1['y'] = pts1['y'].round().astype(int)
pts2 = | pd.merge(pts0, pts1, on=['x', 'y']) | pandas.merge |
import numpy
import matplotlib.pyplot as plt
import tellurium as te
from rrplugins import Plugin
auto = Plugin("tel_auto2000")
from te_bifurcation import model2te, run_bf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
sf = ScalarFormatter()
sf.set_scientific(False)
import re
import seaborn as sns
import os
from pickle import dump, load
from sympy import *
import lhsmdu
import sobol_seq
import pickle
# Define symbolic variables for symbolic Jacobian
R, r, C1, C2, mR1, mR2, K, K1, K2, m, a, b, sR, ksi, ksm, ki0, ki1, km0, km1, k, sR, a1, a2, b1, b2, A = symbols('R r C1 C2 mR1 mR2 K K1 K2 m a b sR ksi ksm ki0 ki1 km0 km1 k s_R a1 a2 b1 b2 A', positive=True, real=True)
c1A, c1B, c2, rev, koff, kR, sR0, sR, g, s, C = symbols('c1A c1B c2 rev koff kR sR0 sR g s C', positive=True, real=True)
R, r, C, mR1, mR2, K, K1, K2, m, a, b, sR, ksi, ksm, ki0, ki1, km0, km1, k, kR, A = \
symbols('R r C mR1 mR2 K K1 K2 m a b sR ksi ksm ki0 ki1 km0 km1 k k_R A', positive=True, real=True)
# Samples of parameter values
n = int(1E2) # Production run 1E5
ss = sobol_seq.i4_sobol_generate(4, n)
l = np.power(2, -3 + (4+3)*ss[:,:2])
a1sp, b1sp = l[:,0], l[:,1]
Ksp = 10**(ss[:,-2]*(np.log10(70000)-np.log10(7)) + np.log10(7))
gsp = 10**(ss[:,-1]*(np.log10(2)-np.log10(0.02)) + np.log10(0.02))
# Model definition
model_mmi1_full = {
'pars': {
'sR': 0.0,
'a1' : 1,
'b1' : 1,
'sR0': 0.0,
'g': 1.0,
'K' : 10000,
'koff': 100,
},
'vars': {
'r': '1 - koff*K*R*r + koff*C - g*r + a1*C',
'R': 'sR0 + sR - koff*K*R*r + koff*C - R + b1*g*C',
'C': 'koff*K*R*r - koff*C - a1*C - b1*g*C',
},
'fns': {}, 'aux': [], 'name': 'mmi1_full'}
ics_1_mmi1_full = {'r': 0.9, 'R': 0.0, 'C': 0.0}
# Symbolic Jacobian
eqnD = {}
for k, v in model_mmi1_full['vars'].items():
eqnD[k] = parsing.sympy_parser.parse_expr(v, locals())
JnD = Matrix([eqnD['R'], eqnD['r'], eqnD['C']]).jacobian(Matrix([R, r, C]))
fJnD = lambdify((K, R, r, C, a1, b1, g, koff), JnD, 'numpy')
# Tellurium object
r = model2te(model_mmi1_full, ics=ics_1_mmi1_full)
uplim = 120
if 1:
# A new run
hb_cts, hbi, hbnds = 0, [], []
data_all = []
inuerr = []
for i in range(n):
print(i)
for j, p in enumerate(['a1', 'b1']):
r[p] = l[i,j]
r['g'], r['K'] = gsp[i], Ksp[i]
data, bounds, boundsh = run_bf(r, auto, dirc="+", par="sR", lims=[0,uplim],
ds=1E-2, dsmin=1E-5, dsmax=0.1)
if data.r.iloc[-1] < -1:
data, bounds, boundsh = run_bf(r, auto, dirc="+", par="sR", lims=[0,uplim],
ds=1E-2, dsmin=1E-5, dsmax=0.01)
data_all.append(data)
if len(boundsh) > 0:
print('HB point found')
hb_cts += 1
hbi.append(i)
hbnds.append(boundsh)
if 1: # Save the output
fn = './te_data/bf_data_MMI1.tebf'
specs = {'model':model_mmi1_full, 'n':n, 'uplim':uplim, 'Ksp':Ksp,
'gsp':gsp,
'a1sp':a1sp, 'b1sp':b1sp }
with open(fn, 'wb') as f:
pickle.dump({'data_all': data_all, 'specs': specs}, f)
print('Sets with HB: ', hb_cts)
print('Numerical errors', len(inuerr))
else:
# Reading a single file
fn = './te_data/bf_data_MMI1.tebf'
print('Reading', fn)
with open(fn, 'rb') as f:
f_cont = pickle.load(f)
data_all, specs = f_cont['data_all'], f_cont['specs']
n, uplim, Ksp, gsp = specs['n'], specs['uplim'], specs['Ksp'], specs['gsp']
a1sp, b1sp = specs['a1sp'], specs['b1sp']
print('Curves: '+str(n)+'\t','uplim: '+str(uplim))
for sp in ['Ksp', 'gsp', 'a1sp', 'b1sp']:
print(sp + ' is between %.4f and %.4f'%(specs[sp].min(), specs[sp].max()))
print('\n')
# More detailed analysis of the continuation output
oui = [] # Spiral sinks
hbi = [] # Hopf
mxi = [] # Hopf and SN
inuerr = []
binned_Rs = []
binned_Rts = []
binned_cons = []
hist_imag = np.zeros(60)
nR = 62
do_pars = []
for i, data in enumerate(data_all[:]):
if ((i+1) % 10000) == 0:
print(i+1)
if len(data) == 0:
inuerr.append(i)
continue
if data.PAR.iloc[-1] < (uplim-1) or data.PAR.iloc[-1] > (uplim+1):
mxi.append(i)
if (data.TY == 3).sum()>0:
hbi.append(i)
Rsp, rsp, Csp = data.R.values, data.r.values, data.C.values
JnDsp = fJnD(Ksp[i], Rsp, rsp, Csp, a1sp[i], b1sp[i], gsp[i], 100.0)
Jsp = np.zeros((JnDsp.shape[0], JnDsp.shape[0], Rsp.shape[0]))
for p in range(JnDsp.shape[0]):
for q in range(JnDsp.shape[1]):
Jsp[p,q,:] = JnDsp[p,q]
Jsp = np.swapaxes(np.swapaxes(Jsp, 0, 2), 1,2)
w, v = np.linalg.eig(Jsp)
#print(w)
if_imag = np.imag(w) != 0
imags = ((if_imag).sum(axis=1)>0) & (Rsp>-10) & (rsp>-10)
igt = np.where(Rsp>0.01)[0]
if (len(igt) > 0):
sRthr = data.PAR[igt[0]]
std_sigs = np.linspace(sRthr*0.0, sRthr*3.1, nR)
ids = np.searchsorted(data.PAR, std_sigs)
binned_R, binned_Rt = np.empty(nR), np.empty(nR)
binned_R[:], binned_Rt[:] = np.NaN, np.NaN
R_data = Rsp[[x for x in ids if x < Rsp.size]]
Rt_data = R_data + Csp[[x for x in ids if x < Rsp.size]]
binned_R[:R_data.size] = R_data
binned_Rt[:R_data.size] = Rt_data
binned_Rs.append(binned_R)
binned_Rts.append(binned_Rt)
binned_cons.append(std_sigs)
if imags.sum() > 0:
if (a1sp[i]>1 and b1sp[i]>1) or (a1sp[i]<1 and b1sp[i]<1):
continue
rmax, imax = np.real(w).min(axis=1), np.imag(w).max(axis=1)
oui.append(i)
imagi = np.where(imags>0)[0]
if len(igt) > 0:
hs, bins = np.histogram(data.PAR[imagi], bins=np.linspace(sRthr*0.0, sRthr*3.0, hist_imag.size+1))
hist_imag = hist_imag + ((hs>0)+0)
fig, ax = plt.subplots(figsize=(3,3))
fig.subplots_adjust(bottom=0.2, right=0.78, left=0.15)
ax2 = ax.twinx()
ax2.bar(range(hist_imag.size), hist_imag/n, color='y', zorder=-10, width=1.0, alpha=0.5)
dfl = pd.DataFrame(binned_Rs).melt()
sns.lineplot(x="variable", y="value", data=dfl, color='k', ax=ax, ci=99.9, palette="flare")
ax.set_ylabel(r'Steady state $\it{R}$ (A.U.)')
ax.set_xlabel(r'$\sigma_R$')
ax.set_xticks([0, 20, 40, 60])
ltr = r'$\hat{\it{\sigma_R}}$'
ax.set_xticklabels([0, ltr, r'2$\times$'+ltr, r'3$\times$'+ltr])
ax.set_xlim(0, 40)
ax.spines['right'].set_color('y')
ax2.spines['right'].set_color('y')
ax2.yaxis.label.set_color('y')
ax2.tick_params(axis='y', colors='y')
ax2.set_ylabel(r'Frequency (spiral sink)')
plt.show()
figc, axc = plt.subplots(figsize=(4,3))
figc.subplots_adjust(bottom=0.2, right=0.90, left=0.25)
sns.lineplot(x="variable", y="value", data=dfl, color='k', ax=axc, ci=99.9, palette="flare", label=r'$\it{R}$')
dft = pd.DataFrame(binned_Rts).melt()
sns.lineplot(x="variable", y="value", data=dft, color='m', ax=axc, ci=99.9, palette="flare", label=r'$\it{R}\rm{_T}$')
dfc = | pd.DataFrame(binned_cons) | pandas.DataFrame |
import streamlit as st
col1, col2 = st.beta_columns((2,1))
col1.write("""# Análise de sentimento""")
col2.image('a3.png', width =60)
st.write("""Utilizando modelos de processamento de linguagem natural e dados de texto do Twitter elaboramos uma aplicação que dada uma palavara de interesse é possível medir o sentimento atual relativo à aquela palavra.""")
import tweepy as tw
from leia import SentimentIntensityAnalyzer
import pandas as pd
import seaborn as sns
def get_sentiment(score):
if score['compound'] > 0.05:
score['sentiment'] = 'Positivo'
elif score['compound'] < -0.05:
score['sentiment'] = 'Negativo'
else:
score['sentiment'] = 'Neutro'
def get_probabilities(scores):
pos_count = 0
neg_count = 0
neu_count = 0
total_score = 0
for score in scores:
total_score+=score['compound']
if score['sentiment'] == 'Positivo':
pos_count+=1
elif score['sentiment'] == 'Negativo':
neg_count+=1
else:
neu_count+=1
return pos_count/len(scores), neg_count/len(scores), neu_count/len(scores), total_score/len(scores)
def get_probabilities_(scores):
pos_count = 0
neg_count = 0
neu_count = 0
total_score = 0
for score in scores.iloc:
total_score+=score['compound']
if score['sentiment'] == 'Positivo':
pos_count+=1
elif score['sentiment'] == 'Negativo':
neg_count+=1
else:
neu_count+=1
return pos_count/len(scores), neg_count/len(scores), neu_count/len(scores), total_score/len(scores)
def f(x):
if x == 'positive':
return 'Positivo'
if x == 'negative':
return 'Negativo'
if x == 'neutral':
return 'Neutro'
s = SentimentIntensityAnalyzer()
word = st.text_input('Digite uma palavra')
if word:
consumer_key = 'F5ucChFt93smoSFH8UGA9Qxph'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_token_secret = '<KEY>'
auth = tw.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tw.API(auth,wait_on_rate_limit=True)
public_tweets = api.home_timeline()
tweets = tw.Cursor(api.search,q=word).items(1000)
scores = []
with st.spinner(text='Carregando dados do Twitter...'):
for tweet in tweets:
if tweet.metadata['iso_language_code'] == 'pt':
#print(tweet.text)
data = s.polarity_scores(tweet.text)
data['text'] = tweet.text
scores.append(data)
st.success('Done')
for score in scores:
get_sentiment(score)
pos,neg,neu,avg = get_probabilities(scores)
data = | pd.DataFrame(scores) | pandas.DataFrame |
import os
import json
import pickle
from sys import getsizeof
from memory_profiler import profile #, memory_usage
from pprint import pprint
from pandas import DataFrame
from networkx import write_gpickle, read_gpickle
from dotenv import load_dotenv
from conftest import compile_mock_rt_graph
from app import DATA_DIR, seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.gcs_service import GoogleCloudStorageService
from conftest import compile_mock_rt_graph
load_dotenv()
DIRPATH = os.getenv("DIRPATH", default="graphs/mock_graph")
DRY_RUN = (os.getenv("DRY_RUN", default="false") == "true")
WIFI_ENABLED = (os.getenv("WIFI_ENABLED", default="true") == "true")
class GraphStorage:
def __init__(self, dirpath=None, gcs_service=None):
"""
Saves and loads artifacts from the networkx graph compilation process, using local storage and/or Google Cloud Storage.
Params:
dirpath (str) like "graphs/my_graph/123"
TODO: bot probability stuff only apples to bot retweet graphs, and should probably be moved into a child graph storage class
"""
self.gcs_service = gcs_service or GoogleCloudStorageService()
self.dirpath = dirpath or DIRPATH
self.gcs_dirpath = os.path.join("storage", "data", self.dirpath)
self.local_dirpath = os.path.join(DATA_DIR, self.dirpath) # TODO: to make compatible on windows, split the dirpath on "/" and re-join using os.sep
print("-------------------------")
print("GRAPH STORAGE...")
print(" DIRPATH:", self.dirpath)
print(" GCS DIRPATH:", self.gcs_dirpath)
print(" LOCAL DIRPATH:", os.path.abspath(self.local_dirpath))
print(" WIFI ENABLED:", WIFI_ENABLED)
seek_confirmation()
if not os.path.exists(self.local_dirpath):
os.makedirs(self.local_dirpath)
self.results = None
self.graph = None
@property
def metadata(self):
return {
"dirpath": self.dirpath,
#"local_dirpath": os.path.abspath(self.local_dirpath),
#"gcs_dirpath": self.gcs_dirpath,
"gcs_service": self.gcs_service.metadata,
"wifi_enabled": WIFI_ENABLED
}
#
# LOCAL STORAGE
#
@property
def local_metadata_filepath(self):
return os.path.join(self.local_dirpath, "metadata.json")
@property
def local_results_filepath(self):
return os.path.join(self.local_dirpath, "results.csv")
@property
def local_graph_filepath(self):
return os.path.join(self.local_dirpath, "graph.gpickle")
@property
def local_bot_probabilities_filepath(self):
return os.path.join(self.local_dirpath, "bot_probabilities.csv")
@property
def local_bot_probabilities_histogram_filepath(self):
return os.path.join(self.local_dirpath, "bot_probabilities_histogram.png")
def write_metadata_to_file(self):
print(logstamp(), "WRITING METADATA...")
with open(self.local_metadata_filepath, "w") as f:
json.dump(self.metadata, f)
def write_results_to_file(self):
print(logstamp(), "WRITING RESULTS...")
df = | DataFrame(self.results) | pandas.DataFrame |
"""
Make folds
"""
import argparse
import copy
import json
import math
import os.path
import sys
from pathlib import Path
sys.path.append('/home/user/challenges/lyft/lyft_repo/src')
import cv2
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, train_test_split
from configs import DATA_ROOT
from pyquaternion import Quaternion
class Table:
def __init__(self, data):
self.data = data
self.index = {x['token']: x for x in data}
def load_table(name, root=os.path.join(DATA_ROOT, 'train_data')):
with open(os.path.join(root, name), 'rb') as f:
return Table(json.load(f))
def get_scene_samples(scene: Table, sample: Table, if_save = False):
"""Get all samples in scenes dictionary"""
my_scene = scene.data[0]
all_tokens = {}
# loop over scenes
for num in range(180):
my_scene = scene.data[num]
token = my_scene['token']
# get all 126 samples tokens for this scene
sample_token = my_scene["first_sample_token"]
scene_sample_tokens = []
scene_sample_tokens.append(sample_token)
for num in range(125):
next_token = sample.index[sample_token]["next"] # proceed to next sample
#print(next_token)
scene_sample_tokens.append(next_token)
sample.index[next_token]
sample_token = next_token
all_tokens[token] = scene_sample_tokens
#print(all_tokens)
if if_save:
all_tokens_df = pd.DataFrame.from_dict(all_tokens).to_csv('scenes_samples.csv', index=False)
return all_tokens
def make_split_by_car(df: pd.DataFrame):
"""
Make train - validation split by cars (hosts)
Args:
df = pd.DataFrame(columns=["host", "scene_name", "date",
"scene_token", "first_sample_token"])
"""
hosts = df["host"].unique()
print(hosts)
# split cars (hosts)
train_hosts, validation_hosts = train_test_split(
hosts, test_size=0.25, random_state=413)
print('train hosts {}, val hosts {}'.format(len(train_hosts), len(validation_hosts)))
validation_df = df[df["host"].isin(validation_hosts)]
vi = validation_df.index
train_df = df[~df.index.isin(vi)]
print(len(train_df), len(validation_df), "train/validation split scene counts")
return train_df, validation_df
def make_split_by_scene(df: pd.DataFrame):
"""
Make train - validation split by scenes
Args:
df = pd.DataFrame(columns=["host", "scene_name", "date",
"scene_token", "first_sample_token"])
"""
scene_tokens = df["scene_token"].values
#print(scene_tokens)
# split scenes
scenes_train, scenes_val = train_test_split(
scene_tokens, test_size=0.25, random_state=413)
print('train scenes {}, val scenes {}'.format(len(scenes_train), len(scenes_val)))
validation_df = df[df["scene_token"].isin(scenes_val)]
vi = validation_df.index
train_df = df[~df.index.isin(vi)]
print(len(train_df), len(validation_df), "train/validation split scene counts")
# sanity checks
host_count_df = validation_df.groupby("host")['scene_token'].count()
print('validation_df host counts {}'.format(host_count_df))
return train_df, validation_df
def split_by_scene_stratify_hosts(df: pd.DataFrame, if_save = False):
"""
Make train - validation split by scenes, stratified by host
Args:
df = pd.DataFrame(columns=["host", "scene_name", "date",
"scene_token", "first_sample_token"])
if_save: boolean flag weather to save the folds dataframes
"""
df['folds'] = 0
scene_tokens = df["scene_token"].values
hosts = df["host"].values
skf = StratifiedKFold(n_splits=4, random_state=413, shuffle=True)
# split scenes stratified by car
for num, (train_index, test_index) in enumerate(skf.split(scene_tokens, hosts)):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_val = scene_tokens[train_index], scene_tokens[test_index]
y_train, y_test = hosts[train_index], hosts[test_index]
# save folds to scv file
df['folds'].iloc[test_index] = num
print(df.head(10))
print(df['folds'].unique())
print('train scenes {}, val scenes {}'.format(len(X_train), len(X_val)))
validation_df = df[df["scene_token"].isin(X_val)]
vi = validation_df.index
train_df = df[~df.index.isin(vi)]
# sanity checks
host_count_df = validation_df.groupby("host")['scene_token'].count()
print('validation_df host counts {}'.format(host_count_df))
host_count_df = train_df.groupby("host")['scene_token'].count()
print('train_df host counts {}'.format(host_count_df))
if if_save:
df.to_csv('scenes_folds.csv', index = False)
return df, train_df, validation_df
def main():
# get scenes and hosts
df = pd.read_csv('host_scenes.csv')
host_count_df = df.groupby("host")['scene_token'].count()
print(host_count_df)
# make folds
# train_df, validation_df = make_split_by_car(df)
# train_df, validation_df = make_split_by_scene(df)
# df, train_df, validation_df = split_by_scene_stratify_hosts(df, True)
# scenes tokens for train and validation
df = | pd.read_csv('scenes_folds.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 28 13:14:48 2019
@author: RDCRLDDH
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pwlf
from openpyxl import load_workbook
import sys, getopt, ast, os
import warnings
warnings.filterwarnings("ignore")
# suppress divide and invalid warnings
np.seterr(divide='ignore')
np.seterr(invalid='ignore')
def formatter(tmp_data):
tmp_data = tmp_data.drop('A', axis=1)
names = tmp_data.loc[0].tolist()
names[0] = 'Date'
tmp_data.columns = names
tmp_data = tmp_data.iloc[6:]
tmp_data['Date'] = pd.to_datetime(tmp_data['Date'])
tmp_data[names[1:]] = tmp_data[names[1:]].apply(lambda x: x.astype('float'))
return tmp_data
def WY_generator(year):
start = pd.datetime((year-1),9,1)
end = pd.datetime(year, 8,30)
return start, end
def TI_index(tmp_sub,base=32):
tmp_sub = tmp_sub.apply(lambda x: x-base)
return tmp_sub
def r2_calculator(my_pwlf):
# calculate the piecewise R^2 value
R2values = np.zeros(my_pwlf.n_segments)
for i in range(my_pwlf.n_segments):
# segregate the data based on break point locations
xmin = my_pwlf.fit_breaks[i]
xmax = my_pwlf.fit_breaks[i+1]
xtemp = my_pwlf.x_data
ytemp = my_pwlf.y_data
indtemp = np.where(xtemp >= xmin)
xtemp = my_pwlf.x_data[indtemp]
ytemp = my_pwlf.y_data[indtemp]
indtemp = np.where(xtemp <= xmax)
xtemp = xtemp[indtemp]
ytemp = ytemp[indtemp]
# predict for the new data
yhattemp = my_pwlf.predict(xtemp)
# calcualte ssr
e = yhattemp - ytemp
ssr = np.dot(e, e)
# calculate sst
ybar = np.ones(ytemp.size) * np.mean(ytemp)
ydiff = ytemp - ybar
sst = np.dot(ydiff, ydiff)
R2values[i] = 1.0 - (ssr/sst)
return R2values
def df_merger(swe_sub, tmp_sub):
swe_col,tmp_col = [],[]
[swe_col.append(tuple([col,'SWE'])) for col in swe_sub.columns]
swe_sub.columns=pd.MultiIndex.from_tuples(swe_col)
[tmp_col.append(tuple([col,'TI'])) for col in tmp_sub.columns]
tmp_sub.columns=pd.MultiIndex.from_tuples(tmp_col)
merge = pd.merge(swe_sub,tmp_sub, left_index=True, right_index=True, how='left')
merge = merge.sort_index(level=0, axis=1)
return merge
def regression_analysis(my_pwlf,col):
r_sqd = r2_calculator(my_pwlf)
breaks = my_pwlf.fit_breaks
breaks = [ breaks[i:i+2] for i in range(len(breaks))][:-1]
tmp = pd.concat([pd.DataFrame(breaks), pd.DataFrame(r_sqd),pd.DataFrame(my_pwlf.slopes),pd.DataFrame(my_pwlf.intercepts)],axis=1)
tmp.columns = pd.MultiIndex.from_tuples([(col,'TI_Start'),(col,'TI_End'),(col,'r_sqd'),(col,'ATIMR'),(col,'Intercept')])
return tmp
def gradient_merger(grad_df,col):
grad_col = [(col,'ATI'), (col,'ATIMR')]
grad_df.columns = pd.MultiIndex.from_tuples(grad_col)
return grad_df
def mypercent(x):
return np.percentile(x,75)
def composite_meltrate(grad_out):
#calculate possible composite functions
new_df = grad_out.stack(level=0)
pct_75 = lambda y: np.percentile(y, 75)
pct_75 .__name__ = 'pct_75 '
pct_25 = lambda y: np.percentile(y, 25)
pct_25 .__name__ = 'pct_25'
#calculate possible composite functions
func_list = [pct_25, np.median, np.mean ,pct_75]
new_df = pd.pivot_table(new_df, values='ATIMR',index='ATI',aggfunc=func_list)
new_df.columns = ['Composite_25_pct','Composite_Median','Composite_Mean','Composite_75_pct']
new_df= new_df.drop_duplicates()
return new_df
def export_analysis(stat_out, swe_sub, tmp_sub, comp_mr, out_book,year):
'''
Fuction to export the data to excel workbook
Inputs:
stat_out = pandas dataframe contining the resluts of the regression analysis
swe_sub = pandas dataframe containing the SWE measurments for a WY
tmp_sub = pandas dataframe containing the TI calcualations for a WY
comp_mr = pandas dataframe containing the composite TI vs. ATIMR results
out_book = string representing the name of the outbook excel workbook
year = string of the WY analyzed
'''
if os.path.exists(out_book):
pass
else:
writer = | pd.ExcelWriter(out_book,engine='xlsxwriter') | pandas.ExcelWriter |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 13 18:53:16 2021
@author: <NAME>
https://www.kaggle.com/ash316/eda-to-prediction-dietanic
"""
"""
Part1: Exploratory Data Analysis(EDA):
1)Analysis of the features.
2)Finding any relations or trends considering multiple features.
Part2: Feature Engineering and Data Cleaning:
1)Adding any few features.
2)Removing redundant features.
3)Converting features into suitable form for modeling.
Part3: Predictive Modeling
1)Running Basic Algorithms.
2)Cross Validation.
3)Ensembling.
4)Important Features Extraction.
"""
# Part1: Exploratory Data Analysis(EDA)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings('ignore')
data=pd.read_csv('D:\\AI\\Kaggle\\EDA To Prediction(DieTanic)\\train.csv')
#data.info()
#(data.isnull().sum()) # check for missing values (NAN)
# The Age, Cabin and Embarked have null values. I will try to fix them.
f,ax=plt.subplots(1,2,figsize=(18,8))
data['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True)
ax[0].set_title('Survived')
ax[0].set_ylabel('')
sns.countplot('Survived',data=data,ax=ax[1])
ax[1].set_title('Survived')
plt.show()
# Categorical Features :: Such as sex: has two sub-cats, male and female
# Ordinal Features :: similair to Categorical Features but we have a relative order
# take for example Height with values, TALL, MEDIUM, SHORT
# Continous Feature :: Can take any values between two points, or btw min and max, say age
print(data.groupby(['Sex','Survived'])['Survived'].count())
f,ax=plt.subplots(1,2,figsize=(18,8))
data[['Sex','Survived']].groupby(['Sex']).mean().plot.bar(ax=ax[0])
ax[0].set_title('Survived vs Sex')
sns.countplot('Sex',hue='Survived',data=data,ax=ax[1])
ax[1].set_title('Sex: Survived vs Dead')
plt.show()
f,ax=plt.subplots(1,2,figsize=(18,8))
data['Pclass'].value_counts().plot.bar(color=['#CD7F32','#FFDF00','#D3D3D3'],ax=ax[0])
ax[0].set_title('Number Of Passengers By Pclass')
ax[0].set_ylabel('Count')
sns.countplot('Pclass',hue='Survived',data=data,ax=ax[1])
ax[1].set_title('Pclass:Survived vs Dead')
plt.show()
sns.factorplot('Pclass','Survived',hue='Sex',data=data)
plt.show()
print('Oldest Passenger was of:',data['Age'].max(),'Years')
print('Youngest Passenger was of:',data['Age'].min(),'Years')
print('Average Age on the ship:',data['Age'].mean(),'Years')
f,ax=plt.subplots(1,2,figsize=(18,8))
sns.violinplot("Pclass","Age", hue="Survived", data=data,split=True,ax=ax[0])
ax[0].set_title('Pclass and Age vs Survived')
ax[0].set_yticks(range(0,110,10))
sns.violinplot("Sex","Age", hue="Survived", data=data,split=True,ax=ax[1])
ax[1].set_title('Sex and Age vs Survived')
ax[1].set_yticks(range(0,110,10))
plt.show()
data['Initial']=0
for i in data:
data['Initial']=data.Name.str.extract('([A-Za-z]+)\.') #lets extract the Salutations
data['Initial'].replace(['Mlle','Mme','Ms','Dr','Major','Lady','Countess','Jonkheer','Col','Rev','Capt','Sir','Don'],['Miss','Miss','Miss','Mr','Mr','Mrs','Mrs','Other','Other','Other','Mr','Mr','Mr'],inplace=True)
data.groupby('Initial')['Age'].mean() #lets check the average age by Initials
## Assigning the NaN Values with the Ceil values of the mean ages
data.loc[(data.Age.isnull())&(data.Initial=='Mr'),'Age']=33
data.loc[(data.Age.isnull())&(data.Initial=='Mrs'),'Age']=36
data.loc[(data.Age.isnull())&(data.Initial=='Master'),'Age']=5
data.loc[(data.Age.isnull())&(data.Initial=='Miss'),'Age']=22
data.loc[(data.Age.isnull())&(data.Initial=='Other'),'Age']=46
data.Age.isnull().any() #So no null values left finally
f,ax=plt.subplots(1,2,figsize=(20,10))
data[data['Survived']==0].Age.plot.hist(ax=ax[0],bins=20,edgecolor='black',color='red')
ax[0].set_title('Survived= 0')
x1=list(range(0,85,5))
ax[0].set_xticks(x1)
data[data['Survived']==1].Age.plot.hist(ax=ax[1],color='green',bins=20,edgecolor='black')
ax[1].set_title('Survived= 1')
x2=list(range(0,85,5))
ax[1].set_xticks(x2)
plt.show()
sns.factorplot('Pclass','Survived',col='Initial',data=data)
plt.show()
sns.factorplot('Embarked','Survived',data=data)
fig=plt.gcf()
fig.set_size_inches(5,3)
plt.show()
f,ax=plt.subplots(2,2,figsize=(20,15))
sns.countplot('Embarked',data=data,ax=ax[0,0])
ax[0,0].set_title('No. Of Passengers Boarded')
sns.countplot('Embarked',hue='Sex',data=data,ax=ax[0,1])
ax[0,1].set_title('Male-Female Split for Embarked')
sns.countplot('Embarked',hue='Survived',data=data,ax=ax[1,0])
ax[1,0].set_title('Embarked vs Survived')
sns.countplot('Embarked',hue='Pclass',data=data,ax=ax[1,1])
ax[1,1].set_title('Embarked vs Pclass')
plt.subplots_adjust(wspace=0.2,hspace=0.5)
plt.show()
sns.factorplot('Pclass','Survived',hue='Sex',col='Embarked',data=data)
plt.show()
data['Embarked'].fillna('S',inplace=True)
data.Embarked.isnull().any()# Finally No NaN values
pd.crosstab([data.SibSp],data.Survived).style.background_gradient(cmap='summer_r')
f,ax=plt.subplots(1,2,figsize=(20,8))
sns.barplot('SibSp','Survived',data=data,ax=ax[0])
ax[0].set_title('SibSp vs Survived')
sns.factorplot('SibSp','Survived',data=data,ax=ax[1])
ax[1].set_title('SibSp vs Survived')
plt.close(2)
plt.show()
"""
Observations:
The barplot and factorplot shows that if a passenger is alone onboard with no siblings, he have 34.5% survival rate. The graph roughly decreases if the number of siblings increase. This makes sense. That is, if I have a family on board, I will try to save them instead of saving myself first. Surprisingly the survival for families with 5-8 members is 0%. The reason may be Pclass??
The reason is Pclass. The crosstab shows that Person with SibSp>3 were all in Pclass3. It is imminent that all the large families in Pclass3(>3) died.
"""
print('Highest Fare was:',data['Fare'].max())
print('Lowest Fare was:',data['Fare'].min())
print('Average Fare was:',data['Fare'].mean())
f,ax=plt.subplots(1,3,figsize=(20,8))
sns.distplot(data[data['Pclass']==1].Fare,ax=ax[0])
ax[0].set_title('Fares in Pclass 1')
sns.distplot(data[data['Pclass']==2].Fare,ax=ax[1])
ax[1].set_title('Fares in Pclass 2')
sns.distplot(data[data['Pclass']==3].Fare,ax=ax[2])
ax[2].set_title('Fares in Pclass 3')
plt.show()
sns.heatmap(data.corr(),annot=True,cmap='RdYlGn',linewidths=0.2) #data.corr()-->correlation matrix
fig=plt.gcf()
fig.set_size_inches(10,8)
plt.show()
"""
POSITIVE CORRELATION: If an increase in feature A leads to increase in feature B, then they are positively correlated. A value 1 means perfect positive correlation.
NEGATIVE CORRELATION: If an increase in feature A leads to decrease in feature B, then they are negatively correlated. A value -1 means perfect negative correlation.
"""
"""
Now lets say that two features are highly or perfectly correlated, so the increase in one leads to increase in the other. This means that both the features are containing highly similar information and there is very little or no variance in information. This is known as MultiColinearity as both of them contains almost the same information.
So do you think we should use both of them as one of them is redundant. While making or training models, we should try to eliminate redundant features as it reduces training time and many such advantages.
Now from the above heatmap,we can see that the features are not much correlated. The highest correlation is between SibSp and Parch i.e 0.41. So we can carry on with all features.
"""
data['Age_band']=0
data.loc[data['Age']<=16,'Age_band']=0
data.loc[(data['Age']>16)&(data['Age']<=32),'Age_band']=1
data.loc[(data['Age']>32)&(data['Age']<=48),'Age_band']=2
data.loc[(data['Age']>48)&(data['Age']<=64),'Age_band']=3
data.loc[data['Age']>64,'Age_band']=4
data.head(2)
sns.factorplot('Age_band','Survived',data=data,col='Pclass')
plt.show()
data['Family_Size']=0
data['Family_Size']=data['Parch']+data['SibSp']#family size
data['Alone']=0
data.loc[data.Family_Size==0,'Alone']=1#Alone
f,ax=plt.subplots(1,2,figsize=(18,6))
sns.factorplot('Family_Size','Survived',data=data,ax=ax[0])
ax[0].set_title('Family_Size vs Survived')
sns.factorplot('Alone','Survived',data=data,ax=ax[1])
ax[1].set_title('Alone vs Survived')
plt.close(2)
plt.close(3)
plt.show()
data['Fare_Range']=pd.qcut(data['Fare'],4)
data.groupby(['Fare_Range'])['Survived'].mean().to_frame().style.background_gradient(cmap='summer_r')
data['Fare_cat']=0
data.loc[data['Fare']<=7.91,'Fare_cat']=0
data.loc[(data['Fare']>7.91)&(data['Fare']<=14.454),'Fare_cat']=1
data.loc[(data['Fare']>14.454)&(data['Fare']<=31),'Fare_cat']=2
data.loc[(data['Fare']>31)&(data['Fare']<=513),'Fare_cat']=3
sns.factorplot('Fare_cat','Survived',data=data,hue='Sex')
plt.show()
data['Sex'].replace(['male','female'],[0,1],inplace=True)
data['Embarked'].replace(['S','C','Q'],[0,1,2],inplace=True)
data['Initial'].replace(['Mr','Mrs','Miss','Master','Other'],[0,1,2,3,4],inplace=True)
data.drop(['Name','Age','Ticket','Fare','Cabin','Fare_Range','PassengerId'],axis=1,inplace=True)
sns.heatmap(data.corr(),annot=True,cmap='RdYlGn',linewidths=0.2,annot_kws={'size':20})
fig=plt.gcf()
fig.set_size_inches(18,15)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Part3: Predictive Modeling
We have gained some insights from the EDA part. But with that, we cannot accurately predict or tell whether a passenger will survive or die. So now we will predict the whether the Passenger will survive or not using some great Classification Algorithms.Following are the algorithms I will use to make the model:
1)Logistic Regression
2)Support Vector Machines(Linear and radial)
3)Random Forest
4)K-Nearest Neighbours
5)Naive Bayes
6)Decision Tree
7)Logistic Regression
"""
#importing all the required ML packages
from sklearn.linear_model import LogisticRegression #logistic regression
from sklearn import svm #support vector Machine
from sklearn.ensemble import RandomForestClassifier #Random Forest
from sklearn.neighbors import KNeighborsClassifier #KNN
from sklearn.naive_bayes import GaussianNB #Naive bayes
from sklearn.tree import DecisionTreeClassifier #Decision Tree
from sklearn.model_selection import train_test_split #training and testing data split
from sklearn import metrics #accuracy measure
from sklearn.metrics import confusion_matrix #for confusion matrix
train,test=train_test_split(data,test_size=0.3,random_state=0,stratify=data['Survived'])
train_X=train[train.columns[1:]]
train_Y=train[train.columns[:1]]
test_X=test[test.columns[1:]]
test_Y=test[test.columns[:1]]
X=data[data.columns[1:]]
Y=data['Survived']
# Radial Support Vector Machines(rbf-SVM)
model=svm.SVC(kernel='rbf',C=1,gamma=0.1)
model.fit(train_X,train_Y)
prediction1=model.predict(test_X)
print('Accuracy for rbf SVM is ',metrics.accuracy_score(prediction1,test_Y))
# Linear Support Vector Machine(linear-SVM)
model=svm.SVC(kernel='linear',C=0.1,gamma=0.1)
model.fit(train_X,train_Y)
prediction2=model.predict(test_X)
print('Accuracy for linear SVM is',metrics.accuracy_score(prediction2,test_Y))
# Logistic Regression
model = LogisticRegression()
model.fit(train_X,train_Y)
prediction3=model.predict(test_X)
print('The accuracy of the Logistic Regression is',metrics.accuracy_score(prediction3,test_Y))
# Decision Tree
model=DecisionTreeClassifier()
model.fit(train_X,train_Y)
prediction4=model.predict(test_X)
print('The accuracy of the Decision Tree is',metrics.accuracy_score(prediction4,test_Y))
# K-Nearest Neighbours(KNN)
model=KNeighborsClassifier()
model.fit(train_X,train_Y)
prediction5=model.predict(test_X)
print('The accuracy of the KNN is',metrics.accuracy_score(prediction5,test_Y))
# Now the accuracy for the KNN model changes as we change the values for n_neighbours attribute. The default value is 5. Lets check the accuracies over various values of n_neighbours.
a_index=list(range(1,11))
a=pd.Series()
x=[0,1,2,3,4,5,6,7,8,9,10]
for i in list(range(1,11)):
model=KNeighborsClassifier(n_neighbors=i)
model.fit(train_X,train_Y)
prediction=model.predict(test_X)
a=a.append(pd.Series(metrics.accuracy_score(prediction,test_Y)))
plt.plot(a_index, a)
plt.xticks(x)
fig=plt.gcf()
fig.set_size_inches(12,6)
plt.show()
print('Accuracies for different values of n are:',a.values,'with the max value as ',a.values.max())
# Gaussian Naive Bayes
model=GaussianNB()
model.fit(train_X,train_Y)
prediction6=model.predict(test_X)
print('The accuracy of the NaiveBayes is',metrics.accuracy_score(prediction6,test_Y))
# Random Forests
model=RandomForestClassifier(n_estimators=100)
model.fit(train_X,train_Y)
prediction7=model.predict(test_X)
print('The accuracy of the Random Forests is',metrics.accuracy_score(prediction7,test_Y))
"""
The accuracy of a model is not the only factor that determines the robustness of the classifier. Let's say that a classifier is trained over a training data and tested over the test data and it scores an accuracy of 90%.
Now this seems to be very good accuracy for a classifier, but can we confirm that it will be 90% for all the new test sets that come over??. The answer is No, because we can't determine which all instances will the classifier will use to train itself. As the training and testing data changes, the accuracy will also change. It may increase or decrease. This is known as model variance.
To overcome this and get a generalized model,we use CROSS VALIDATION.
"""
# CROSS VALIDATION used to OVERCOME MODEL VARIANCE
"""
Cross Validation
Many a times, the data is imbalanced, i.e there may be a high number of class1 instances but less number of other class instances. Thus we should train and test our algorithm on each and every instance of the dataset. Then we can take an average of all the noted accuracies over the dataset.
1)The K-Fold Cross Validation works by first dividing the dataset into k-subsets.
2)Let's say we divide the dataset into (k=5) parts. We reserve 1 part for testing and train the algorithm over the 4 parts.
3)We continue the process by changing the testing part in each iteration and training the algorithm over the other parts. The accuracies and errors are then averaged to get a average accuracy of the algorithm.
This is called K-Fold Cross Validation.
4)An algorithm may underfit over a dataset for some training data and sometimes also overfit the data for other training set. Thus with cross-validation, we can achieve a generalised model.
"""
from sklearn.model_selection import KFold #for K-fold cross validation
from sklearn.model_selection import cross_val_score #score evaluation
from sklearn.model_selection import cross_val_predict #prediction
kfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts
xyz=[]
accuracy=[]
std=[]
classifiers=['Linear Svm','Radial Svm','Logistic Regression','KNN','Decision Tree','Naive Bayes','Random Forest']
models=[svm.SVC(kernel='linear'),svm.SVC(kernel='rbf'),LogisticRegression(),KNeighborsClassifier(n_neighbors=9),DecisionTreeClassifier(),GaussianNB(),RandomForestClassifier(n_estimators=100)]
for i in models:
model = i
cv_result = cross_val_score(model,X,Y, cv = kfold,scoring = "accuracy")
cv_result=cv_result
xyz.append(cv_result.mean())
std.append(cv_result.std())
accuracy.append(cv_result)
new_models_dataframe2=pd.DataFrame({'CV Mean':xyz,'Std':std},index=classifiers)
new_models_dataframe2
plt.subplots(figsize=(12,6))
box=pd.DataFrame(accuracy,index=[classifiers])
box.T.boxplot()
new_models_dataframe2['CV Mean'].plot.barh(width=0.8)
plt.title('Average CV Mean Accuracy')
fig=plt.gcf()
fig.set_size_inches(8,5)
plt.show()
f,ax=plt.subplots(3,3,figsize=(12,10))
y_pred = cross_val_predict(svm.SVC(kernel='rbf'),X,Y,cv=10)
sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[0,0],annot=True,fmt='2.0f')
ax[0,0].set_title('Matrix for rbf-SVM')
y_pred = cross_val_predict(svm.SVC(kernel='linear'),X,Y,cv=10)
sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[0,1],annot=True,fmt='2.0f')
ax[0,1].set_title('Matrix for Linear-SVM')
y_pred = cross_val_predict(KNeighborsClassifier(n_neighbors=9),X,Y,cv=10)
sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[0,2],annot=True,fmt='2.0f')
ax[0,2].set_title('Matrix for KNN')
y_pred = cross_val_predict(RandomForestClassifier(n_estimators=100),X,Y,cv=10)
sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[1,0],annot=True,fmt='2.0f')
ax[1,0].set_title('Matrix for Random-Forests')
y_pred = cross_val_predict(LogisticRegression(),X,Y,cv=10)
sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[1,1],annot=True,fmt='2.0f')
ax[1,1].set_title('Matrix for Logistic Regression')
y_pred = cross_val_predict(DecisionTreeClassifier(),X,Y,cv=10)
sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[1,2],annot=True,fmt='2.0f')
ax[1,2].set_title('Matrix for Decision Tree')
y_pred = cross_val_predict(GaussianNB(),X,Y,cv=10)
sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[2,0],annot=True,fmt='2.0f')
ax[2,0].set_title('Matrix for Naive Bayes')
plt.subplots_adjust(hspace=0.2,wspace=0.2)
plt.show()
from sklearn.model_selection import GridSearchCV
C=[0.05,0.1,0.2,0.3,0.25,0.4,0.5,0.6,0.7,0.8,0.9,1]
gamma=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
kernel=['rbf','linear']
hyper={'kernel':kernel,'C':C,'gamma':gamma}
gd=GridSearchCV(estimator=svm.SVC(),param_grid=hyper,verbose=True)
gd.fit(X,Y)
print(gd.best_score_)
print(gd.best_estimator_)
n_estimators=range(100,1000,100)
hyper={'n_estimators':n_estimators}
gd=GridSearchCV(estimator=RandomForestClassifier(random_state=0),param_grid=hyper,verbose=True)
gd.fit(X,Y)
print(gd.best_score_)
print(gd.best_estimator_)
from sklearn.ensemble import VotingClassifier
ensemble_lin_rbf=VotingClassifier(estimators=[('KNN',KNeighborsClassifier(n_neighbors=10)),
('RBF',svm.SVC(probability=True,kernel='rbf',C=0.5,gamma=0.1)),
('RFor',RandomForestClassifier(n_estimators=500,random_state=0)),
('LR',LogisticRegression(C=0.05)),
('DT',DecisionTreeClassifier(random_state=0)),
('NB',GaussianNB()),
('svm',svm.SVC(kernel='linear',probability=True))
],
voting='soft').fit(train_X,train_Y)
print('The accuracy for ensembled model is:',ensemble_lin_rbf.score(test_X,test_Y))
cross=cross_val_score(ensemble_lin_rbf,X,Y, cv = 10,scoring = "accuracy")
print('The cross validated score is',cross.mean())
from sklearn.ensemble import BaggingClassifier
model=BaggingClassifier(base_estimator=KNeighborsClassifier(n_neighbors=3),random_state=0,n_estimators=700)
model.fit(train_X,train_Y)
prediction=model.predict(test_X)
print('The accuracy for bagged KNN is:',metrics.accuracy_score(prediction,test_Y))
result=cross_val_score(model,X,Y,cv=10,scoring='accuracy')
print('The cross validated score for bagged KNN is:',result.mean())
model=BaggingClassifier(base_estimator=DecisionTreeClassifier(),random_state=0,n_estimators=100)
model.fit(train_X,train_Y)
prediction=model.predict(test_X)
print('The accuracy for bagged Decision Tree is:',metrics.accuracy_score(prediction,test_Y))
result=cross_val_score(model,X,Y,cv=10,scoring='accuracy')
print('The cross validated score for bagged Decision Tree is:',result.mean())
from sklearn.ensemble import AdaBoostClassifier
ada=AdaBoostClassifier(n_estimators=200,random_state=0,learning_rate=0.1)
result=cross_val_score(ada,X,Y,cv=10,scoring='accuracy')
print('The cross validated score for AdaBoost is:',result.mean())
from sklearn.ensemble import GradientBoostingClassifier
grad=GradientBoostingClassifier(n_estimators=500,random_state=0,learning_rate=0.1)
result=cross_val_score(grad,X,Y,cv=10,scoring='accuracy')
print('The cross validated score for Gradient Boosting is:',result.mean())
import xgboost as xg
xgboost=xg.XGBClassifier(n_estimators=900,learning_rate=0.1)
result=cross_val_score(xgboost,X,Y,cv=10,scoring='accuracy')
print('The cross validated score for XGBoost is:',result.mean())
n_estimators=list(range(100,1100,100))
learn_rate=[0.05,0.1,0.2,0.3,0.25,0.4,0.5,0.6,0.7,0.8,0.9,1]
hyper={'n_estimators':n_estimators,'learning_rate':learn_rate}
gd=GridSearchCV(estimator=AdaBoostClassifier(),param_grid=hyper,verbose=True)
gd.fit(X,Y)
print(gd.best_score_)
print(gd.best_estimator_)
ada=AdaBoostClassifier(n_estimators=200,random_state=0,learning_rate=0.05)
result=cross_val_predict(ada,X,Y,cv=10)
sns.heatmap(confusion_matrix(Y,result),cmap='winter',annot=True,fmt='2.0f')
plt.show()
f,ax=plt.subplots(2,2,figsize=(15,12))
model=RandomForestClassifier(n_estimators=500,random_state=0)
model.fit(X,Y)
| pd.Series(model.feature_importances_,X.columns) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.