prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding: utf-8
# # Convert downloaded TCGA datasets into sample × gene matrices
#
# This notebook is updated to include the data from the [TCGA PanCanAtlas April 2018 updates](http://www.cell.com/pb-assets/consortium/pancanceratlas/pancan/index.html).
# In[1]:
import collections
import os
import pandas
# ## Read gene information
# In[2]:
# Load genes
path = os.path.join('download', 'genes', 'genes.tsv')
gene_df = (pandas.read_table(path, dtype='str')
.set_index('entrez_gene_id', drop=False)
[['entrez_gene_id', 'symbol', 'description', 'gene_type']]
)
gene_df.head(2)
# In[3]:
# Load gene updater
path = os.path.join('download', 'genes', 'updater.tsv')
updater_df = pandas.read_table(path)
old_to_new_entrez = dict(zip(updater_df.old_entrez_gene_id,
updater_df.new_entrez_gene_id))
# In[4]:
# Load chromosome-symbol to entrez_gene_id mapping
path = os.path.join('download', 'genes', 'chromosome-symbol-mapper.tsv')
chr_sym_map_df = pandas.read_table(path)
chr_sym_map_df.chromosome = 'chr' + chr_sym_map_df.chromosome
chr_sym_map_df.head(2)
# ## Read sample information
#
# This file contains sample information. See the [online documentation](https://xenabrowser.net/datapages/?dataset=EB%2B%2BAdjustPANCAN_IlluminaHiSeq_RNASeqV2.geneExp.xena&host=https%3A%2F%2Fpancanatlas.xenahubs.net) for more information. For more details on curation refer to [Liu et al. 2018](https://doi.org/10.1016/j.cell.2018.02.052 "An Integrated TCGA Pan-Cancer Clinical Data Resource to Drive High-Quality Survival Outcome Analytics")
# In[5]:
path = os.path.join('mapping', 'diseases.tsv')
disease_df = pandas.read_table(path)
disease_df.head(2)
# In[6]:
# Data from https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/sample-type-codes
path = os.path.join('mapping', 'tcga_sampletype_codes.csv')
sampletype_codes_df = | pandas.read_csv(path, dtype='str') | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = | pd.read_pickle(path) | pandas.read_pickle |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz='Asia/Tokyo'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + int => datetime64tz
# ToDo: must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + object => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
'x',
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_timedelta64(self):
pass
def test_fillna_series_period(self):
pass
def test_fillna_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01',
'2011-01-03', '2011-01-04'])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Index([ | pd.Timestamp('2011-01-01') | pandas.Timestamp |
import pyvinecopulib as pv
import numpy as np
import pandas as pd
from experiments_utils import random_bicop, get_pvcopfamily, beta_copula_cdf, emp_cdf, gaussian_mixture_copula
from models.igc import ImplicitGenerativeCopula
from datetime import datetime
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
### set options ###
N_TRAIN = 1000
N_TEST = 5000
N_ROUNDS = 25
FAMILIES = ["student", "clayton", "gumbel", "gaussian_mixture"]
L2_SCORES={}
### run experiments ###
LOG={}
for fam in FAMILIES:
print(fam)
l2=[]
logs_fam = {"true": [], "param": [], "param_select": []}
for i in range(N_ROUNDS):
cdf_vals={}
if fam != "gaussian_mixture":
# true copula
params = random_bicop(fam)
logs_fam["true"].append(params)
params["family"] = get_pvcopfamily(fam)
copula = pv.Bicop(**params)
u_train = copula.simulate(N_TRAIN)
U_TEST = copula.simulate(N_TEST)
cdf_vals_true = np.reshape(copula.cdf(U_TEST), (-1,1))
else:
u_train, U_TEST, cdf_vals_true, params = gaussian_mixture_copula(n_train=N_TRAIN, n_test=N_TEST, n_sim=200000)
cdf_vals_true = np.reshape(cdf_vals_true, (-1,1))
logs_fam["true"].append(params)
if fam != "gaussian_mixture":
# fit parametric copula
controls_param = pv.FitControlsBicop(family_set=[get_pvcopfamily(fam)], selection_criterion="bic")
cop_param = pv.Bicop()
cop_param.select(u_train, controls=controls_param)
logs_fam["param"].append(cop_param.str())
cdf_vals["param"] = cop_param.cdf(U_TEST)
# fit parametric copula without knowing the correct family
controls_param_select = pv.FitControlsBicop(family_set=[pv.BicopFamily.indep,
pv.BicopFamily.gaussian,
pv.BicopFamily.student,
pv.BicopFamily.clayton,
pv.BicopFamily.gumbel,
pv.BicopFamily.frank,
pv.BicopFamily.joe,
pv.BicopFamily.bb1,
pv.BicopFamily.bb6,
pv.BicopFamily.bb7,
pv.BicopFamily.bb8],
selection_criterion="bic")
cop_param_select = pv.Bicop()
cop_param_select.select(u_train, controls=controls_param_select)
logs_fam["param_select"].append(cop_param_select.str())
cdf_vals["param_select"] = cop_param_select.cdf(U_TEST)
# fit non-parametric beta copula
cdf_vals["beta"] = beta_copula_cdf(u_train=u_train, u_test=U_TEST)
# fit non-parametric copula
controls_tll1 = pv.FitControlsBicop(nonparametric_method="linear", family_set=[pv.BicopFamily.tll])
cop_tll1 = pv.Bicop()
cop_tll1.select(u_train, controls=controls_tll1)
cdf_vals["tll1"] = emp_cdf(U_TEST, cop_tll1.simulate(200000))
# fit non-parametric copula
controls_tll2 = pv.FitControlsBicop(nonparametric_method="quadratic", family_set=[pv.BicopFamily.tll])
cop_tll2 = pv.Bicop()
cop_tll2.select(u_train, controls=controls_tll2)
cdf_vals["tll2"] = emp_cdf(U_TEST, cop_tll2.simulate(200000))
# # fit IGC copula
cop_igc = ImplicitGenerativeCopula(dim_latent=6, dim_out=2, n_samples_train=200, n_layers=2, n_neurons=100)
hist=cop_igc.fit(u_train, batch_size=100, epochs=500)
cdf_vals["igc"] = cop_igc.cdf(v=U_TEST, n=200000)
errors = cdf_vals_true-pd.DataFrame(cdf_vals)
l2.append(errors.pow(2).sum(axis=0))
L2_SCORES[fam] = | pd.DataFrame(l2) | pandas.DataFrame |
"""
Unit and regression test for the kissim.comparison.FeatureDistances class.
"""
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from kissim.comparison import FeatureDistances
PATH_TEST_DATA = Path(__name__).parent / "kissim" / "tests" / "data"
class TestsFeatureDistances:
"""
Test FeatureDistances class methods.
"""
@pytest.mark.parametrize(
"feature_pair, distance_measure, distance",
[
(np.array([[4, 0], [0, 3]]), "scaled_euclidean", 2.5),
(np.array([]), "scaled_euclidean", np.nan),
],
)
def test_calculate_feature_distance(self, feature_pair, distance_measure, distance):
"""
Test distance calculation for two value (feature) lists.
Parameters
----------
feature_pair : np.ndarray
Pairwise bits of one feature extracted from two fingerprints (only bit positions
without any NaN value).
distance_measure : str
Type of distance measure, defaults to Euclidean distance.
distance : float
Distance between two value lists.
"""
feature_distances = FeatureDistances()
distance_calculated = feature_distances._calculate_feature_distance(
feature_pair, distance_measure
)
if np.isnan(distance):
assert np.isnan(distance_calculated)
else:
assert np.isclose(distance_calculated, distance, rtol=1e-04)
@pytest.mark.parametrize(
"feature_pair, distance_measure",
[("feature_pair", "scaled_euclidean")], # Feature pair is not np.ndarray
)
def test_calculate_feature_distance_typeerror(self, feature_pair, distance_measure):
"""
Test TypeError exceptions in distance calculation for two value (feature) lists.
Parameters
----------
feature_pair : np.ndarray
Pairwise bits of one feature extracted from two fingerprints (only bit positions
without any NaN value).
distance_measure : str
Type of distance measure, defaults to Euclidean distance.
"""
with pytest.raises(TypeError):
feature_distance = FeatureDistances()
feature_distance._calculate_feature_distance(feature_pair, distance_measure)
@pytest.mark.parametrize(
"feature_pair, distance_measure",
[
(np.array([[1, 2], [1, 2]]), "xxx"), # Distance measure is not implemented
(
np.array([[1, 2], [1, 2], [1, 2]]),
"scaled_euclidean",
), # Feature pair has more than two rows
(np.array([[1, 2], [1, 2]]), 11), # Distance measure is not str
],
)
def test_calculate_feature_distance_valueerror(self, feature_pair, distance_measure):
"""
Test ValueError exceptions in distance calculation for two value (feature) lists.
Parameters
----------
feature_pair : np.ndarray
Pairwise bits of one feature extracted from two fingerprints (only bit positions
without any NaN value).
distance_measure : str
Type of distance measure, defaults to Euclidean distance.
"""
with pytest.raises(ValueError):
feature_distance = FeatureDistances()
feature_distance._calculate_feature_distance(feature_pair, distance_measure)
@pytest.mark.parametrize(
"feature1, feature2, distance, bit_coverage",
[
(pd.Series([1, 1, 1, 1]), pd.Series([0, 0, 0, 0]), 0.5, 1.0),
(pd.Series([1, 1, 1, 1, np.nan]), pd.Series([0, 0, 0, 0, 0]), 0.5, 0.8),
(pd.Series([1, 1, 1, 1, 1]), pd.Series([0, 0, 0, 0, np.nan]), 0.5, 0.8),
( | pd.Series([1, 1, 1, 1, np.nan]) | pandas.Series |
#!/usr/bin/env python3
import os
import re
import cv2
import keras
import numpy as np
import pandas as pd
DATA_PATH = 'cage/images/'
LEFT_PATH = 'data/left.h5'
RIGHT_PATH = 'data/right.h5'
NUM_PATH = 'data/numbers.csv'
DataSet = (np.ndarray, np.ndarray, np.ndarray)
def extract() -> (list, list):
l_data, r_data = [], []
pattern = r'(\d)(\d)_\w.*\.png'
for _, _, files in os.walk(DATA_PATH, topdown=False):
for file in files:
if 'png' not in file:
continue
match = re.match(pattern, file)
if not match or len(match.groups()) != 2:
continue
l_num, r_num = match.groups()
file = DATA_PATH + file
img: np.ndarray = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, width = img.shape
l_part: np.ndarray = img[..., :int(width / 2)]
r_part: np.ndarray = img[..., int(width / 2):]
l_data.append({'image': l_part, 'number': l_num})
r_data.append({'image': r_part, 'number': r_num})
return l_data, r_data
def load() -> (DataSet, DataSet):
l_full, r_full = extract()
np.random.shuffle(l_full)
np.random.shuffle(r_full)
xl_full, yl_full = np.array([x['image'] for x in l_full]), np.array([int(x['number']) for x in l_full])
xr_full, yr_full = np.array([x['image'] for x in r_full]), np.array([int(x['number']) for x in r_full])
xl_train, yl_train = xl_full[:int(len(xl_full) / 3)], yl_full[:int(len(yl_full) / 3)]
xr_train, yr_train = xr_full[:int(len(xr_full) / 3)], yr_full[:int(len(yr_full) / 3)]
xl_valid, yl_valid = \
xl_full[int(len(xl_full) / 3):int(len(xl_full) / 3 * 2)], \
yl_full[int(len(yl_full) / 3):int(len(yl_full) / 3 * 2)]
xr_valid, yr_valid = \
xr_full[int(len(xr_full) / 3):int(len(xr_full) / 3 * 2)], \
yr_full[int(len(yr_full) / 3):int(len(yr_full) / 3 * 2)]
xl_test, yl_test = \
xl_full[int(len(xl_full) / 3 * 2):], \
yl_full[int(len(yl_full) / 3 * 2):]
xr_test, yr_test = \
xr_full[int(len(xr_full) / 3 * 2):], \
yr_full[int(len(yr_full) / 3 * 2):]
xl_mean, xr_mean = \
xl_train.mean(axis=0, keepdims=True), \
xr_train.mean(axis=0, keepdims=True)
xl_std, xr_std = \
xl_train.std(axis=0, keepdims=True) + 1e-7, \
xr_train.std(axis=0, keepdims=True) + 1e-7
xl_train, xr_train = (xl_train - xl_mean) / xl_std, (xr_train - xr_mean) / xr_std
xl_valid, xr_valid = (xl_valid - xl_mean) / xl_std, (xr_valid - xr_mean) / xr_std
xl_test, xr_test = (xl_test - xl_mean) / xl_std, (xr_test - xr_mean) / xr_std
xl_train, xr_train = xl_train[..., np.newaxis], xr_train[..., np.newaxis]
xl_valid, xr_valid = xl_valid[..., np.newaxis], xr_valid[..., np.newaxis]
xl_test, xr_test = xl_test[..., np.newaxis], xr_test[..., np.newaxis]
if not os.path.exists(NUM_PATH):
nums = {
'l_mean': xl_mean.tolist(),
'l_std': xl_std.tolist(),
'r_mean': xr_mean.tolist(),
'r_std': xr_std.tolist(),
}
with open(NUM_PATH, 'xt', encoding='utf-8', newline='\n') as f:
| pd.DataFrame([nums]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = | pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean']) | pandas.DataFrame |
import json
import pandas as pd
from tqdm import tqdm
def read_var(file='parameters.xlsx', scenario='base'):
parameter_frame = | pd.read_excel(file) | pandas.read_excel |
import pandas as pd
def load_data(portfolio_data_absolute_path="/home/chris/Dropbox/Finance/data/portfolio_trades.ods",
stock_data_absolute_path="/home/chris/Dropbox/Finance/data/stock_trades.ods",
income_data_absolute_path="/home/chris/Dropbox/Finance/data/income.ods",
etf_master_data_absolute_path="/home/chris/Dropbox/Finance/data/generated/master_data_stocks.ods",
stock_price_data_absolute_path="/home/chris/Dropbox/Finance/data/generated/stock_prices.ods",
cashflow_path = "/home/chris/Dropbox/Finance/data/data_cashflow/bilanz_full.csv",
crypto_path = "/home/chris/Dropbox/Finance/data/crypto/crypto_trades_manual.ods",
include_speculation=False):
"""
Needs odfpy library to load .ods files!
Loads all necessary data sources of the given portfolio: ETF savings portfolio data, speculation data
(stocks, cryptos, etc).
:param order_data__absolute_path: path to source data for ETF portfolio (filetype: .ods)
:param etf_master_data_absolute_path: path to master data of ETFs (filetype: .ods)
:param stock_price_data_absolute_path: path to price data of ETFs (filetype: .ods)
:param include_speculation: Whether orders of speculation portfolio should be included in output
:param cashflow_path: csv file of cashflow data
:return: tupel of pd.DataFrames with portfolio transactions and master data
"""
orders_portfolio = pd.read_excel(portfolio_data_absolute_path, engine="odf", sheet_name="Buys")
dividends_portfolio = pd.read_excel(portfolio_data_absolute_path, engine="odf", sheet_name="Dividends")
orders_speculation = pd.read_excel(stock_data_absolute_path, engine="odf", sheet_name="Buys")
income = pd.read_excel(income_data_absolute_path, engine="odf")
stock_prices = pd.read_csv(stock_price_data_absolute_path)
etf_master = pd.read_csv(etf_master_data_absolute_path)
cashflow_init = pd.read_csv(cashflow_path)
df_crypto_deposits = pd.read_excel(crypto_path, engine="odf", sheet_name="Deposits", skiprows=2, usecols="A:G")
df_crypto_trades = pd.read_excel(crypto_path, engine="odf", sheet_name="Trades", skiprows=1)
if include_speculation == True:
return ((etf_master, orders_portfolio, dividends_portfolio, income, stock_prices, cashflow_init,
orders_speculation, df_crypto_deposits, df_crypto_trades))
else:
return ((etf_master, orders_portfolio, dividends_portfolio, income, stock_prices, cashflow_init,
None, df_crypto_deposits, df_crypto_trades))
def cleaning_cashflow(df_input: pd.DataFrame) -> pd.DataFrame:
"""
Data cleaning and preprocessing of cashflow data.
:param df_input: Multiple toshl monthly-exports appended into a single dataframe
:return: preprocessed dataframe
"""
import numpy as np
assert df_input.drop("Description",
axis=1).isna().sum().sum() == 0, \
f"There are NaN values in inputfile: {path_data}{filename_cashflow}"
### Data cleaning
df_init = df_input.copy()
df_init['Date'] = pd.to_datetime(df_init['Date'], format='%m/%d/%y')
df_init.drop(columns=['Account', 'Currency', 'Main currency', 'Description'], inplace=True)
df_init['Expense amount'] = df_init['Expense amount'].str.replace(',', '')
df_init['Income amount'] = df_init['Income amount'].str.replace(',', '').astype(np.float64)
df_init['In main currency'] = df_init['In main currency'].str.replace(',', '')
df_init['Expense amount'] = df_init['Expense amount'].astype(np.float64)
df_init['In main currency'] = df_init['In main currency'].astype(np.float64)
### Preprocessing of cashflow amounts
df_init['Amount'] = pd.Series([-y if x > 0. else y
for x, y in zip(df_init['Expense amount'],
df_init['In main currency']
)
]
)
assert df_init[(~df_init["Income amount"].isin(["0.0", "0"])) &
(df_init["In main currency"] != df_init["Amount"])
].count().sum() == 0, "Income amount does not match with main currency amount!"
assert df_init[(~df_init["Expense amount"].isin(["0.0", "0"])) &
(-df_init["In main currency"] != df_init["Amount"])
].count().sum() == 0, "Expense amount does not match with main currency amount!"
### Remap all tags with category "Urlaub" to "old-tag, Urlaub" and map afterwards all double-tags
### containing "Urlaub" to the Urlaub tag
df_init.loc[df_init["Category"] == "Urlaub", "Tags"] = df_init["Tags"].apply(lambda tag: tag + ", Urlaub")
df_init["split_tags"] = df_init["Tags"].apply(lambda x: x.split(","))
assert df_init[df_init["split_tags"].apply(len) > 1]["split_tags"].apply(lambda x: \
"Urlaub" in [s.strip() for s in x]
).all() == True,\
'Some entries with multiple tags do not contain "Urlaub"! Mapping not possible!'
df_init.loc[df_init["split_tags"].apply(len) > 1, "Tags"] = "Urlaub"
df_init = df_init[["Date", "Category", "Tags", "Amount"]]
return(df_init)
def split_cashflow_data(df_cleaned: pd.DataFrame) -> pd.DataFrame:
"""
Splits whole cashflow data into incomes and expenses and groups it monthly and sums amounts per tag
:param df_cleaned: Cleaned dataframe of cashflow
:return: Tuple of dataframes holding incomes and expenses, each grouped by month
"""
needed_columns = ["Tags", "Date", "Amount"]
assert set(needed_columns).intersection(set(df_cleaned.columns)) == set(needed_columns), \
"Columns missing! Need: {0}, Have: {1}".format(needed_columns, list(df_cleaned.columns))
df_grouped = df_cleaned.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum()
incomes = df_grouped[df_grouped["Amount"] > 0.].copy()
expenses = df_grouped[df_grouped["Amount"] <= 0.].copy()
return((incomes, expenses))
def preprocess_cashflow(df: pd.DataFrame) -> pd.DataFrame:
"""
Remap tags of input data to custom categories, and change the format of the dataframe in order to
easily to computations and plots of the cashflow data.
:param df: Dataframe, holding either incomes or expenses (cleaned) and grouped by month (tags as rows)
:return: dataframe, where each row consists of cashflow data of of a month, each column represents a
custom category
"""
assert isinstance(df.index, pd.core.indexes.multi.MultiIndex) and \
set(df.index.names) == set(["Date", "Tags"]) and \
list(df.columns) == ["Amount"], "Dataframe is not grouped by month!"
### Define custom categories for all tags of Toshl: Make sure category names differ from tag-names,
### otherwise column is dropped and aggregate is wrong
category_dict = {
"home": ['rent', 'insurance', 'Miete'],
"food_healthy": ['restaurants', 'Lebensmittel', 'groceries', 'Restaurants', 'Restaurant Mittag'],
"food_unhealthy": ['Fast Food', 'Süßigkeiten'],
"alcoholic_drinks": ['alcohol', 'Alkohol'],
"non-alcoholic_drinks": ['Kaffee und Tee', 'Erfrischungsgetränke', 'coffee & tea', 'soft drinks'],
"travel_vacation": ['sightseeing', 'Sightseeing', 'Beherbergung', 'accommodation', 'Urlaub'],
"transportation": ['bus', 'Bus', 'taxi', 'Taxi', 'metro', 'Metro', 'Eisenbahn', 'train', 'car',
'Auto', 'parking', 'airplane', 'fuel', 'Flugzeug'],
"sports": ['training', 'Training', 'MoTu', 'Turnier', 'sport equipment', 'Billard', 'Konsum Training'],
"events_leisure_books_abos": ['events', 'Events', 'adult fun', 'Spaß für Erwachsene', 'games', 'sport venues',
'membership fees', 'apps', 'music', 'books'],
"clothes_medicine": ['clothes', 'accessories', 'cosmetics', 'medicine', 'hairdresser',
'medical services', 'medical servies', "shoes"],
"private_devices": ['devices', 'bike', 'bicycle', 'movies & TV', 'mobile phone', 'home improvement',
'internet', 'landline phone', 'furniture'],
"presents": ['birthday', 'X-Mas'],
"other": ['wechsel', 'income tax', 'tuition', 'publications', 'Spende'],
"stocks": ['equity purchase'],
#### Income categories
"compensation_caution": ["Entschädigung"],
"salary": ["Salary", "Gehalt Vorschuss"],
"present": ["Geschenk"],
"tax_compensation": ["Kirchensteuer Erstattung", "Steuerausgleich"],
"investment_profit": ["Investing"]
}
from functools import reduce
category_list = reduce(lambda x, y: x + y, category_dict.values())
### Need another format of the table, fill NaNs with zero and drop level 0 index "Amount"
pivot_init = df.unstack()
pivot_init.fillna(0, inplace=True)
pivot_init.columns = pivot_init.columns.droplevel()
#### Extract expenses and incomes from building-upkeep (caution) when switching flats
if 'building upkeep' in pivot_init.columns:
building_upkeep = pivot_init['building upkeep']
pivot_init.drop(columns=['building upkeep'], inplace=True)
elif 'Wechsel' in pivot_init.columns:
building_upkeep = pivot_init['Wechsel']
pivot_init.drop(columns=['Wechsel'], inplace=True)
else:
building_upkeep = None
### Apply custom category definition to dataframe
not_categorized = [tag for tag in pivot_init.columns if tag not in category_list]
assert len(not_categorized) == 0, "There are some tags, which are not yet categorized: {}".format(not_categorized)
pivot = pivot_init.copy()
for category, tag_list in category_dict.items():
tag_list_in_data = list(set(tag_list).intersection(set(pivot.columns)))
pivot[category] = pivot[tag_list_in_data].sum(axis=1)
pivot.drop(columns=tag_list_in_data, inplace=True)
### Keep only categories with non-zero total amount in dataframe
category_sum = pivot.sum().reset_index()
nonzero_categories = list(category_sum[category_sum[0] != 0.]["Tags"])
pivot = pivot[nonzero_categories]
return((building_upkeep, pivot))
def combine_incomes(toshl_income, excel_income):
"""
Combines two data sources of incomes: toshl incomes and incomes from cashflow excel.
:param toshl_income: Preprocessed dataframe of toshl incomes (after cleaning and splitting)
:param excel_income: Raw excel income data
:return: Total income data
"""
df_in = toshl_income.reset_index().copy()
df_in["Tags"] = df_in["Tags"].apply(lambda x: "Salary" if x in ["Privat", "NHK", "OL"] else x)
df_in2 = excel_income.copy()
df_in2 = df_in2[["Datum", "Art", "Betrag"]].rename(columns={"Datum": "Date",
"Art": "Tags",
"Betrag": "Amount"}).dropna()
df_in2["Date"] = pd.to_datetime(df_in2["Date"], format="%d.%m.%Y")
df_in2["Tags"] = df_in2["Tags"].apply(lambda x: "Salary" if x in ["Gehalt", "Sodexo"] else x)
df_income = pd.concat([df_in, df_in2], ignore_index=True)
assert df_income.count()[0] == df_in.count()[0] + df_in2.count()[0], "Some income rows were lost!"
df_income = df_income.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum()
return(df_income)
def preprocess_prices(df_prices: pd.DataFrame) -> pd.DataFrame:
"""
Preprocessing of price dataframe. Get latest available price.
:param df_prices: Needed columns: ISIN, Price, Datum, Currency
:return: dataframe containing prices of stocks defined by ISIN on latest available date
"""
dfp = df_prices.copy()
assert dfp["Currency"].drop_duplicates().count() == 1, "Multiple currencies used for price data!"
assert dfp["Currency"].iloc[0] == "EUR", "Currency is not Euro!"
dfp["Date"] = pd.to_datetime(dfp["Date"], format="%d.%m.%Y")
latest_date = dfp["Date"].max()
df_current_prices = dfp[dfp["Date"] == latest_date].reset_index(drop=True)
return(df_current_prices)
def preprocess_orders(df_orders: pd.DataFrame) -> pd.DataFrame:
"""
Set datatypes of columns and split input into dividends transactions and savings-plan transactions.
:param df_orders: Includes all transaction data of the portfolio, all columns in list portfolio_columns
need to be present, Kommentar column needs to be either "monatlich" (transaction of the
savings plan, an ETF is bought) or "Dividende" (income)
:return: tuple of orders- and dividend transaction entries
"""
orders_portfolio = df_orders.copy()
portfolio_columns = ["Index", "Datum", "Kurs", "Betrag", "Kosten", "Anbieter", "Name", "ISIN"]
new_portfolio_columns = ["Index", "Date", "Price", "Investment", "Ordercost", "Depotprovider", "Name", "ISIN"]
rename_columns = {key: value for key, value in zip(portfolio_columns, new_portfolio_columns)}
orders_portfolio = orders_portfolio.rename(columns=rename_columns)
assert set(orders_portfolio.columns).intersection(set(new_portfolio_columns)) == set(new_portfolio_columns), \
"Some necessary columns are missing in the input dataframe!"
### Keep only valid entries
orders_portfolio = orders_portfolio[~orders_portfolio["Investment"].isna()]
orders_portfolio = orders_portfolio[orders_portfolio["Art"] == "ETF Sparplan"]
orders_portfolio = orders_portfolio[new_portfolio_columns]
orders_portfolio = orders_portfolio[~orders_portfolio["Date"].isna()]
orders_portfolio["Date"] = pd.to_datetime(orders_portfolio["Date"], format="%d.%m.%Y")
orders_portfolio["Index"] = orders_portfolio["Index"].astype(int)
assert (orders_portfolio[orders_portfolio["Investment"] > 0.].count() != 0).any() == False, \
"Positive Einträge im Orderportfolio!"
orders_portfolio["Investment"] = -orders_portfolio["Investment"]
orders_portfolio["Ordercost"] = -orders_portfolio["Ordercost"]
return (orders_portfolio)
def preprocess_etf_masterdata(df_master: pd.DataFrame) -> pd.DataFrame:
"""
Convert columns "physical" and "Acc" to booleans and map all entries in "Region" containing "Emerging" to "Emerging"
:param df_master: Master data of all ETFs, columns in etf_columns are required
:return: preprocessed dataframe
"""
etf_master = df_master.copy()
etf_columns = ["Type", "Name", "ISIN", "Region", "Replikationsmethode", "Ausschüttung", "TER%"]
new_etf_columns = ["Type", "Name", "ISIN", "Region", "Replicationmethod", "Distributing", "TER%"]
etf_master = etf_master.rename(columns={key: value for key, value in zip(etf_columns, new_etf_columns)})
assert set(etf_master.columns).intersection(set(new_etf_columns)) == set(new_etf_columns), \
"Some necessary columns are missing in the input dataframe!"
etf_master = etf_master[new_etf_columns]
etf_master["Replicationmethod"] = etf_master["Replicationmethod"].map(lambda x: "Physical" \
if x[:8] == "Physisch" else "Synthetic")
etf_master["Distributing"] = etf_master["Distributing"].map(lambda x: "Distributing" \
if x == "Ausschüttend" else "Accumulating")
etf_master["Region"] = etf_master["Region"].fillna("").map(lambda x: "Emerging" if "Emerging" in x else x)
return (etf_master)
def preprocess_crypto_data(df_deposits, df_trades_init):
"""
Preprocessing of crypto deposits and trade history. Check if trade data is consistent.
:param df_deposits: Holds all deposits of cryptocurrencies.
:param df_trades_init: Table of all trades between different cryptocurrencies.
:return: tuple of cleaned deposits and trade overview dataframes
"""
df_deposits["date"] = pd.to_datetime(df_deposits["date"])
df_deposits = df_deposits[~df_deposits["currency"].isna()]
price_tolerance = 1e-8
for idx in df_trades_init.index:
if df_trades_init["exchange 1"].iloc[idx] != df_trades_init["exchange 2"].iloc[idx]:
if abs(df_trades_init["amount_spent"].iloc[idx] - df_trades_init["amount_gained"].iloc[idx] -
df_trades_init["fee"].iloc[idx]) > price_tolerance:
print("Error in data! Amount spent does not equal gained with fees!")
gain_columns = {
"date": "date",
"amount_gained": "amount",
"currency gained": "currency",
"exchange 2": "exchange"
}
spent_columns = {
"date": "date",
"amount_spent": "amount",
"currency spent": "currency",
"exchange 1": "exchange"
}
df_trades_cleaned = df_trades_init[gain_columns.keys()].rename(columns=gain_columns)
df_spent = df_trades_init[spent_columns.keys()].rename(columns=spent_columns)
df_spent["amount"] *= -1
df_trades = df_trades_cleaned.append(df_spent, ignore_index=True)
return((df_deposits, df_trades))
def compute_crypto_portfolio(df_deposits_init, df_trades_init):
"""
Combines deposits and trades into a single dataframe and compute portfolio of currencies.
:param df_deposits: preprocessed deposits of cryptocurrencies
:param df_trades: preprocessed trades of cryptocurrencies
:return: portfolio of cryptocurrencies: exchange, currency, amount
"""
df_deposits = df_deposits_init[["exchange", "currency", "amount"]].copy()
df_trades = df_trades_init[["exchange", "currency", "amount"]].copy()
df_all = df_deposits.append(df_trades, ignore_index=True)
crypto_portfolio = df_all.groupby(["exchange", "currency"]).sum().reset_index()
return(crypto_portfolio)
def compute_crypto_portfolio_value(portfolio: pd.DataFrame, prices: pd.DataFrame) -> pd.DataFrame:
"""
Combines current crypto-price data with portfolio and computes value per exchange/currency.
Adds the overall portfolio value with exchange-name "Overall" to the data.
:param portfolio: Holds crypto portfolio data (exchange, currency, amount)
:param prices: Holds prices and masterdata of cryptos (name, symbol, price)
:return: Value of portfolio per cryptocurrency
"""
portfolio_all = portfolio.merge(prices, left_on="currency", right_on="symbol").copy()
portfolio_all = portfolio_all[["exchange", "currency", "name", "amount", "price"]]
portfolio_all.loc[:, "value"] = round(portfolio_all["amount"] * portfolio_all["price"], 3)
portfolio_all = portfolio_all.drop("price", axis=1)
portfolio_overall = portfolio_all.groupby(["currency", "name"]).sum().reset_index()
portfolio_overall["exchange"] = "Overall"
portfolio_value = portfolio_all.append(portfolio_overall, ignore_index=True, sort=False)
return(portfolio_value)
def enrich_orders(df_orders, df_etf):
"""
Join ETF master data to transaction data of ETFs.
:param df_orders: ETF transaction data
:param df_etf: ETF master data
:return:
"""
join_columns_etf_master = ["ISIN", "Type", "Region", "Replicationmethod", "Distributing", "TER%"]
orders_etf = df_orders.merge(df_etf[join_columns_etf_master].drop_duplicates(),
how="inner",
left_on="ISIN",
right_on="ISIN").copy()
assert (orders_etf[orders_etf["Region"].isna()][["ISIN", "Name"]].drop_duplicates().count() > 0).any() == False, \
"No ETF master data!"
return (orders_etf)
def get_current_portfolio(df_orders: pd.DataFrame) -> pd.DataFrame:
"""
Gets transactions of latest executed monthly savings plan of ETF portfolio.
:param df_orders: ETF transaction data
:return:
"""
portfolio = df_orders.copy()
last_execution_index = portfolio["Index"].max()
portfolio = portfolio[portfolio["Index"] == last_execution_index].reset_index(drop=True).drop("Index", axis=1)
return (portfolio)
def compute_percentage_per_group(df: pd.DataFrame, group_names: list, compute_columns:list, agg_functions:list) -> list:
"""
Computes len(group_names) aggregations of input dataframe df according to the given agg_functions wrt to the
specified columns in compute_columns.
These three lists need to have the same length!
Currently only sum() as aggregate function is available.
:param df: pd.DataFrame, that needs to have all columns specified in group_names, compute_columns
:param group_names: list of grouping columns
:param compute_columns: list of columns along which groupby computation should be done
:param agg_functions: list of aggregate functions, which are applied to compute_columns
:return result_list: list of resulting dataframes after groupby aggregation
"""
all_columns = set(df.columns)
all_needed_columns = set(group_names).union(set(compute_columns))
assert all_columns.intersection(all_needed_columns) == all_needed_columns, "Columns not present!"
assert len(group_names) == len(compute_columns), "Number of grouping columns does not match compute columns!"
assert len(group_names) == len(
agg_functions), "Number of grouping columns does not match number of aggregate functions!"
df_copy = df.copy()
result_list = []
for idx, group in enumerate(group_names):
compute_col = compute_columns[idx]
agg_func = agg_functions[idx]
if agg_func == "sum":
df_grouped = df_copy[[group, compute_col]].groupby([group]).sum()
total_sum = df_copy[compute_col].sum()
df_grouped["Percentage"] = round(df_grouped[compute_col] / total_sum, 3) * 100
result_list.append(df_grouped.reset_index())
return (result_list)
def get_portfolio_value(df_trx: pd.DataFrame, df_prices: pd.DataFrame) -> pd.DataFrame:
"""
Computes the current value of each stock given in the transaction list by using most recent price data.
:param df_trx: dataframe containing all transactions
:param df_prices: dataframe containing historic price data
:return:
"""
if (df_trx.isna().sum()>0).any():
print("Some entries contain NaN values! The statistics might be wrong!")
print(df_trx.isna().sum())
needed_columns_trx = set(["Investment", "Price", "ISIN"])
needed_columns_prices = set(["Price", "ISIN"])
assert needed_columns_trx.intersection(set(df_trx.columns)) == needed_columns_trx, \
"One of the following columns are missing in df_trx: {}".format(needed_columns_trx)
assert needed_columns_prices.intersection(set(df_prices.columns)) == needed_columns_prices, \
"One of the following columns are missing in df_prices: {}".format(needed_columns_prices)
df = df_trx.copy()
dfp = df_prices.copy()
### Compute amount of stocks bought
df["Amount"] = df["Investment"] / df["Price"]
### Drop price of orderdata, which is the price at which a stock was bought --> here we use the current price
df = df.drop("Price", axis=1)
df_portfolio = df.merge(dfp, how="left", left_on="ISIN", right_on="ISIN", suffixes=["", "_y"])\
.rename(columns={"Date_y": "last_price_update"})
assert (df_portfolio["Price"].isna().sum()>0).any() == False, "Prices are missing for a transaction!"
df_portfolio["Value"] = round(df_portfolio["Amount"] * df_portfolio["Price"], 2)
return (df_portfolio)
def filter_portfolio_date(portfolio: pd.DataFrame, offset_months: int) -> pd.DataFrame:
"""
Filters the dataframe, portfolio, to all entries that occur after today's date minus offset_months.
:param portfolio: Needs column Date
:param offset_months: Offset of how many months into the past the output of the dataframe should contain.
:return: dataframe filtered up to offset_months into the past
"""
from datetime import date
assert "Date" in portfolio.columns, 'Column "Date" is missing in input dataframe!'
date_today = pd.Timestamp(date.today())
if offset_months == -1:
return(portfolio)
else:
date_offset = pd.DateOffset(months=offset_months)
portfolio_date_filtered = portfolio[portfolio["Date"] >= date_today - date_offset]
return(portfolio_date_filtered)
def filter_portfolio_stock(portfolio: pd.DataFrame, stock_name: str) -> pd.DataFrame:
"""
Filters the dataframe, portfolio, to the given stock_name.
:param portfolio: Dataframe holding transactions
:param stock_name: Name of the stock, to which the dataframe should be filtered.
:return: dataframe filtered on the specified stock name
"""
assert "Name" in portfolio.columns, 'Column "Name" is missing in input dataframe!'
return(portfolio[portfolio["Name"] == stock_name])
def prepare_orderAmounts_prices(orders: pd.DataFrame):
"""
Extracts a dataframe of buy-prices for each stock at each date. Additionally prepare order-dataframe
with amount of stocks at each date.
:param orders: Holds price and investmentamount data for each stock at every date.
:return: Tuple of orders (including amount of stocks) and prices.
"""
prices = orders[["Date", "Name", "Price"]]
necessary_columns = ["Date", "Name", "Investment", "Ordercost", "Amount"]
df_orders = orders.drop_duplicates().copy()
df_orders["Amount"] = df_orders["Investment"] / df_orders["Price"]
df_orders = df_orders[necessary_columns]
return((df_orders, prices))
def prepare_timeseries(orders: pd.DataFrame):
"""
Computes timeseries chart (value/investment vs date) for all stocks in the portfolio.
Computes timeseries chart for overall portfolio (sum of all stock values at given date) and adds it
to the dataframe.
:param orders: dataframe, containing Investmentamount, ordercost and price for each stock per transactiondate
:return:
"""
necessary_columns = ["Date", "Name", "Investment", "Price", "Ordercost"]
assert set(orders.columns).intersection(set(necessary_columns)) == set(necessary_columns), \
"Necessary columns missing in order data for timeseries preparation!"
orders["Amount"] = orders["Investment"]/orders["Price"]
### Map each transaction-date to the beginning of the month for easier comparison
orders["Date"] = orders["Date"].apply(lambda date: | pd.offsets.MonthBegin() | pandas.offsets.MonthBegin |
import http.client
from datetime import datetime
import json
import pandas as pd
API = '<KEY>'
def str2time(strng):
return datetime.strptime(strng, '%H:%M:%S').time()
def str2date(strng):
return datetime.strptime(strng, '%Y-%m-%d').date()
def create_db_connection():
DB_USER = 'root'
DB_PASS = 'password'
DB_HOST = 'localhost'
DB_NAME = 'betmeback'
try:
from sqlalchemy import create_engine
ENGINE = create_engine(
'mysql+mysqldb://{0}:{1}@{2}/{3}?charset=utf8mb4'.format(
DB_USER, DB_PASS, DB_HOST, DB_NAME),
encoding='utf-8',
convert_unicode=True,
)
except Exception as e:
print(e)
return ENGINE
def league_json2csv(Response):
columns = ['league_id', 'caption', 'league', 'start_date', 'end_date',
'number_of_teams', 'number_of_matches', 'current_matchday']
league_id = Response['id']
competition_code = Response['code']
caption = Response['area']['name']+" "+Response['name']
league = Response['code']
start_date = Response['currentSeason']['startDate']
end_date = Response['currentSeason']['endDate']
number_of_teams = get_team_number(league)
number_of_matches = get_match_number(league)
current_matchday = Response['currentSeason']['currentMatchday']
with open(competition_code+"_leagues.csv", "w") as cf:
cf.writelines(','.join(columns)+"\n")
cf.writelines(','.join(str(k) for k in [
league_id, caption, league, start_date, end_date, number_of_teams, number_of_matches, current_matchday])+'\n')
print(','.join(str(k) for k in [league_id, caption, league, start_date,
end_date, number_of_teams, number_of_matches, current_matchday])+'\n')
def team_json2csv(Response):
columns = ['betradar_id', 'league_id', 'name',
'short_name', 'crest_url', 'team_abbreviation']
league_id = Response['competition']['id']
competition_code = Response['competition']['code']
teams = Response['teams']
with open(competition_code+"_teams.csv", "w") as cf:
cf.writelines(','.join(columns)+"\n")
for team in teams:
ln = list()
ln.append(team["id"])
ln.append(league_id)
ln.append(team["name"])
ln.append(team["shortName"])
ln.append(team["crestUrl"])
ln.append(team["tla"])
print(','.join(str(i) for i in ln))
cf.writelines(str(','.join(str(i) for i in ln))+"\n")
print("\t\tDone!")
def match_json2csv(Response):
columns = ['match_id', 'match_date', 'match_time', 'home_team_id', 'home_team_name', 'away_team_id',
'away_team_name', 'league_id', 'match_day', 'home_team_score', 'away_team_score', 'status', 'Winner']
matches = Response['matches']
# nmatches = len(matches)
competition_code = Response['competition']['code']
league_id = Response['competition']['id'].__str__()
with open(competition_code+".csv", 'w') as cf:
cf.writelines(','.join(columns)+"\n")
for match in matches:
ln = list()
ln.append(match['id'])
ln.append(str2date(match['utcDate'].split('T')[0]))
ln.append(str2time(match['utcDate'].split('T')[1].split('Z')[0]))
ln.append(match['homeTeam']['id'].__str__())
ln.append(match['homeTeam']['name'])
ln.append(match['awayTeam']['id'].__str__())
ln.append(match['awayTeam']['name'])
ln.append(league_id)
ln.append(match['matchday'])
ln.append(match['score']['fullTime']['homeTeam'].__str__())
ln.append(match['score']['fullTime']['awayTeam'].__str__())
ln.append(match['status'].__str__())
ln.append(match['score']['winner'])
print(','.join(str(i) for i in ln))
cf.writelines(str(','.join(str(i) for i in ln))+"\n")
print("\t\tDone!")
cf.close()
def unify_teams_csv():
team_csv = ["PL_teams.csv", "BL1_teams.csv",
"SA_teams.csv", "PD_teams.csv", "CL_teams.csv"]
dfs = [ | pd.read_csv(fl) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import logging
import warnings
import os
import pandas_datareader as pdr
from collections import Counter
from scipy import stats
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_percentage_error, mean_absolute_error
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
logging.basicConfig(filename='warnings.log',level=logging.WARNING)
logging.captureWarnings(True)
warnings.simplefilter("ignore")
def mape(y,pred):
return None if 0 in y else mean_absolute_percentage_error(y,pred) # average o(1) worst-case o(n)
def rmse(y,pred):
return mean_squared_error(y,pred)**.5
def mae(y,pred):
return mean_absolute_error(y,pred)
def r2(y,pred):
return r2_score(y,pred)
_estimators_ = {'arima', 'mlr', 'mlp', 'gbt', 'xgboost', 'rf', 'prophet', 'hwes', 'elasticnet','svr','knn','combo'}
_metrics_ = {'r2','rmse','mape','mae'}
_determine_best_by_ = {'TestSetRMSE','TestSetMAPE','TestSetMAE','TestSetR2','InSampleRMSE','InSampleMAPE','InSampleMAE',
'InSampleR2','ValidationMetricValue','LevelTestSetRMSE','LevelTestSetMAPE','LevelTestSetMAE',
'LevelTestSetR2',None}
_colors_ = [
'#FFA500','#DC143C','#00FF7F','#808000','#BC8F8F','#A9A9A9',
'#8B008B','#FF1493','#FFDAB9','#20B2AA','#7FFFD4','#A52A2A',
'#DCDCDC','#E6E6FA','#BDB76B','#DEB887'
]*10
class ForecastError(Exception):
class CannotUndiff(Exception):
pass
class NoGrid(Exception):
pass
class PlottingError(Exception):
pass
class Forecaster:
def __init__(self,
y=pd.Series([]),
current_dates=pd.Series([]),
**kwargs):
self.y = y
self.current_dates = current_dates
self.future_dates = pd.Series([])
self.current_xreg = {} # values should be pandas series (to make differencing work more easily)
self.future_xreg = {} # values should be lists (to make iterative forecasting work more easily)
self.history = {}
self.test_length = 1
self.validation_length = 1
self.validation_metric = 'rmse'
self.integration = 0
for key, value in kwargs.items():
setattr(self,key,value)
self.typ_set() # ensures that the passed values are the right types
def __str__(self):
models = self.history.keys()
if len(models) == 0:
first_prt = 'Forecaster object with no models evaluated.'
else:
first_prt = 'Forecaster object with the following models evaluated: {}.'.format(', '.join(models))
whole_thing = first_prt + ' Data starts at {}, ends at {}, loaded to forecast out {} periods, has {} regressors.'.format(self.current_dates.min(),self.current_dates.max(),len(self.future_dates),len(self.current_xreg.keys()))
return whole_thing
def __repr__(self):
if len(self.history.keys()) > 0:
return self.export('model_summaries')
return self.history
def _adder(self):
assert len(self.future_dates) > 0,'before adding regressors, please make sure you have generated future dates by calling generate_future_dates(), set_last_future_date(), or ingest_Xvars_df(use_future_dates=True)'
def _bank_history(self,**kwargs):
call_me = self.call_me
self.history[call_me] = {
'Estimator':self.estimator,
'Xvars':self.Xvars,
'HyperParams':{k:v for k,v in kwargs.items() if k not in ('Xvars','normalizer','auto')},
'Scaler':kwargs['normalizer'] if 'normalizer' in kwargs.keys() else None if self.estimator in ('prophet','combo') else None if hasattr(self,'univariate') else 'minmax',
'Forecast':self.forecast[:],
'FittedVals':self.fitted_values[:],
'Tuned':kwargs['auto'],
'Integration':self.integration,
'TestSetLength':self.test_length,
'TestSetRMSE':self.rmse,
'TestSetMAPE':self.mape,
'TestSetMAE':self.mae,
'TestSetR2':self.r2,
'TestSetPredictions':self.test_set_pred[:],
'TestSetActuals':self.test_set_actuals[:],
'InSampleRMSE':rmse(self.y.values,self.fitted_values),
'InSampleMAPE':mape(self.y.values,self.fitted_values),
'InSampleMAE':mae(self.y.values,self.fitted_values),
'InSampleR2':r2(self.y.values,self.fitted_values),
}
if kwargs['auto']:
self.history[call_me]['ValidationSetLength'] = self.validation_length
self.history[call_me]['ValidationMetric'] = self.validation_metric
self.history[call_me]['ValidationMetricValue'] = self.validation_metric_value
for attr in ('univariate','first_obs','first_dates','grid_evaluated','models'):
if hasattr(self,attr):
self.history[call_me][attr] = getattr(self,attr)
if self.integration > 0:
first_obs = self.first_obs.copy()
fcst = self.forecast[::-1]
integration = self.integration
y = self.y.to_list()[::-1]
pred = self.history[call_me]['TestSetPredictions'][::-1]
if integration == 2:
first_ = first_obs[1] - first_obs[0]
y.append(first_)
y = list(np.cumsum(y[::-1]))[::-1]
y.append(first_obs[0])
y = list(np.cumsum(y[::-1]))
fcst.append(y[-1])
fcst = list(np.cumsum(fcst[::-1]))[1:]
pred.append(y[-(len(pred) - 1)])
pred = list(np.cumsum(pred[::-1]))[1:]
if integration == 2:
fcst.reverse()
fcst.append(self.y.values[-2] + self.y.values[-1])
fcst = list(np.cumsum(fcst[::-1]))[1:]
pred.reverse()
pred.append(self.y.values[-(len(pred) - 2)] + self.y.values[-(len(pred) - 1)])
pred = list(np.cumsum(pred[::-1]))[1:]
self.history[call_me]['LevelForecast'] = fcst[:]
self.history[call_me]['LevelY'] = y[integration:]
self.history[call_me]['LevelTestSetPreds'] = pred
self.history[call_me]['LevelTestSetRMSE'] = rmse(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetMAPE'] = mape(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetMAE'] = mae(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetR2'] = r2(y[-len(pred):],pred)
else: # better to have these attributes populated for all series
self.history[call_me]['LevelForecast'] = self.forecast[:]
self.history[call_me]['LevelY'] = self.y.to_list()
self.history[call_me]['LevelTestSetPreds'] = self.test_set_pred[:]
self.history[call_me]['LevelTestSetRMSE'] = self.rmse
self.history[call_me]['LevelTestSetMAPE'] = self.mape
self.history[call_me]['LevelTestSetMAE'] = self.mae
self.history[call_me]['LevelTestSetR2'] = self.r2
def _set_summary_stats(self):
results_summary = self.regr.summary()
results_as_html = results_summary.tables[1].as_html()
self.summary_stats = | pd.read_html(results_as_html, header=0, index_col=0) | pandas.read_html |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([ | pd.Period('2011-01-01') | pandas.Period |
# --------------
import pandas as pd
from sklearn import preprocessing
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score
#path : File path
# read the dataset
dataset = | pd.read_csv(path) | pandas.read_csv |
import numpy as np
import pandas as pd
from . import util as DataUtil
from . import cols as DataCol
"""
The main data loader.
TODO: population & common special dates
"""
class DataCenter:
def __init__(self):
self.__kabko = None
self.__dates_global = pd.DataFrame([], columns=DataCol.DATES_GLOBAL)
self.__dates_local = pd.DataFrame([], columns=DataCol.DATES_LOCAL)
self.__date_names_global = np.array([])
self.__date_names_local = np.array([])
self.__population_global = None
self.__covid_local = None
self.raw_global = None
self.data_global = None
def load_covid_local(
self,
df,
kabko_col="kabko",
date_col="date",
rename_cols={
"infected": DataCol.I,
"infectious": DataCol.I,
"recovered": DataCol.R,
"dead": DataCol.D
},
drop_cols=["infected_total"],
drop_first_col=False,
exclude_kabkos=[
"AWAK BUAH KAPAL",
"RS LAPANGAN INDRAPURA"
]
):
df = df.copy()
labels = [DataCol.I, DataCol.R, DataCol.D]
df.loc[:, date_col] = pd.to_datetime(df[date_col])
drop_cols = [df.columns[0], *drop_cols] if drop_first_col else drop_cols
df.drop(columns=drop_cols, axis=1, inplace=True)
df.drop(df.index[df[kabko_col].isin(exclude_kabkos)], inplace=True)
rename_cols = {
kabko_col: DataCol.KABKO,
date_col: DataCol.DATE,
**rename_cols
}
df.rename(columns=rename_cols, inplace=True)
df.loc[:, labels] = df[labels].astype(DataUtil.DEFAULT_DTYPE)
self.__covid_local = df
self.__kabko = df[kabko_col].unique()
return self.__covid_local
@property
def kabko(self):
if self.__kabko is None:
if self.__covid_local is None:
raise Exception("Please set/load covid data first")
self.load_covid_local(self.__covid_local)
return self.__kabko
@property
def covid_local(self):
return self.__covid_local
def load_vaccine(
self,
df,
date_col="date",
labels_orig=[
"people_vaccinated",
"people_fully_vaccinated"
]
):
if labels_orig:
df = df[[date_col, *labels_orig]]
df = df.copy()
df.loc[:, date_col] = pd.to_datetime(df[date_col])
rename_cols = {
date_col: DataCol.DATE,
**dict(zip(labels_orig, [
DataCol.VAC_PEOPLE,
DataCol.VAC_FULL
]))
}
df.rename(columns=rename_cols, inplace=True)
df.set_index(DataCol.DATE, inplace=True)
self.__vaccine = df
return self.__vaccine
@property
def vaccine(self):
return self.__vaccine
def load_test(
self,
df,
date_col="Date",
label_orig="Cumulative total"
):
if label_orig:
df = df[[date_col, label_orig]]
df = df.copy()
df.loc[:, date_col] = pd.to_datetime(df[date_col])
rename_cols = {
date_col: DataCol.DATE,
label_orig: DataCol.TEST
}
df.rename(columns=rename_cols, inplace=True)
df.set_index(DataCol.DATE, inplace=True)
self.__test = df
return self.__test
@property
def test(self):
return self.__test
def load_covid_global(
self,
df,
date_col="date",
label_orig="total_cases"
):
if label_orig:
df = df[[date_col, label_orig]]
df = df.copy()
df.loc[:, date_col] = pd.to_datetime(df[date_col])
rename_cols = {
date_col: DataCol.DATE,
label_orig: DataCol.I_TOT_GLOBAL
}
df.rename(columns=rename_cols, inplace=True)
df.set_index(DataCol.DATE, inplace=True)
self.__covid_global = df
return self.__covid_global
@property
def covid_global(self):
return self.__covid_global
def load_dates(
self,
df,
name=None,
name_col="name",
start_col="start", end_col="end",
val_col="value"
):
if name is None and DataCol.NAME not in df.columns and name_col not in df.columns:
raise Exception("Provide name argument if dataframe doesn't have name column")
if name is not None and (DataCol.NAME in df.columns or name_col in df.columns):
raise Exception("Dataframe already has name column but name argument was given")
df = DataUtil.prepare_dates(
df,
name_col=name_col,
start_col=start_col,
end_col=end_col,
val_col=val_col
)
if name is not None and DataCol.NAME not in df.columns:
df[DataCol.NAME] = pd.Series(np.array(len(df) * [name]), dtype=str)
if DataCol.KABKO not in df.columns:
df = df[DataCol.DATES_GLOBAL]
self.__dates_global = pd.concat([self.__dates_global, df])
self.__date_names_global = self.__dates_global[DataCol.NAME].unique()
else:
df = df[DataCol.DATES_LOCAL]
self.__dates_local = pd.concat([self.__dates_local, df])
self.__date_names_local= self.__dates_local[DataCol.NAME].unique()
self.__date_names = np.unique(np.concatenate([self.__date_names_global, self.__date_names_local]))
return df
@property
def dates_global(self):
return self.__dates_global
@property
def dates_local(self):
return self.__dates_local
@property
def date_names_global(self):
return self.__date_names_global
@property
def date_names_local(self):
return self.__date_names_local
@property
def date_names(self):
return self.__date_names
def get_covid_kabko(
self,
kabko
):
covid = self.covid_local.loc[
self.covid_local[DataCol.KABKO] == kabko,
[
DataCol.DATE,
*DataCol.IRD
]
].copy()
# del covid["kabko"]
covid.set_index(DataCol.DATE, inplace=True)
covid.sort_index(ascending=True, inplace=True)
return covid
def get_dates_kabko(
self,
kabko
):
dates = pd.concat([
self.dates_global.copy(),
self.dates_local[self.dates_local[DataCol.KABKO] == kabko][DataCol.DATES_GLOBAL]
])
return dates
def load_population(
self,
df,
kabko_col="kabko",
label_orig="semua"
):
if label_orig:
df = df[[kabko_col, label_orig]]
df = df.copy()
rename_cols = {
kabko_col: DataCol.KABKO,
label_orig: DataCol.N
}
df.rename(columns=rename_cols, inplace=True)
self.__population = df
self.__population_global = self.get_population_kabko("INDONESIA")
return self.__population
@property
def population(self):
return self.__population
@property
def population_global(self):
return self.__population_global
def get_population_kabko(
self,
kabko
):
return self.population[self.population[DataCol.KABKO] == kabko][DataCol.N].values[0]
def set_global_ts(
self,
vaccine,
test,
covid_global
):
self.__vaccine = vaccine
self.__test = test
self.__covid_global = covid_global
# Full of defaults
# For custom, DIY
def load_excel(
self,
path,
):
self.load_covid_global( | pd.read_excel(path, sheet_name="covid_indo") | pandas.read_excel |
# coding: utf-8
"""
Loads data from :epkg:`INSEE`.
"""
from pandas import to_datetime
from .pandas_cache import read_csv_cache, geo_read_csv_cache
def data_france_departments(cache='dep_france', metropole=False):
"""
Retrieves data from
`Contours géographiques des départements
<https://www.data.gouv.fr/en/datasets/
contours-geographiques-des-departements/>`_.
:param metropole: only for the metropole
:param cache: cache name
:return: geodataframe
"""
url = ("https://www.data.gouv.fr/en/datasets/r/"
"ed02b655-4307-4db4-b1ca-7939145dc20f")
df = geo_read_csv_cache(cache, url)
if 'id' in df.columns:
df = df.drop('id', axis=1)
if metropole:
codes = [_ for _ in set(df.code_depart) if len(_) < 3]
return df[df.code_depart.isin(codes)]
return df
def data_covid_france_departments_hospitals(
cache='covid_france_hosp', metropole=False):
"""
Retrieves data from
`Données hospitalières relatives à l'épidémie de COVID-19
<https://www.data.gouv.fr/fr/datasets/
donnees-hospitalieres-relatives-a-lepidemie-de-covid-19/>`_.
:param cache: cache name
:param metropole: only for the metropole
:return: dataframe
"""
url = ("https://www.data.gouv.fr/fr/datasets/r/"
"63352e38-d353-4b54-bfd1-f1b3ee1cabd7")
df = read_csv_cache(cache, url, sep=';')
df['jour'] = to_datetime(df['jour'])
if metropole:
codes = [_ for _ in set(df.dep) if len(_) < 3]
return df[df.dep.isin(codes)]
return df
def data_covid_france_departments_tests(
cache='covid_france_test',
metropole=False):
"""
Retrieves data from
`Données relatives aux résultats des tests virologiques COVID-19 SI-DEP
<https://www.data.gouv.fr/fr/datasets/
donnees-relatives-aux-resultats-des-tests-virologiques-covid-19/>`_.
:param cache: cache name
:param metropole: only for the metropole
:return: geodatafrale
"""
def trylen(v):
try:
return len(v)
except TypeError as e:
raise TypeError("Issue with '{}'".format(v)) from e
url = ("https://www.data.gouv.fr/fr/datasets/r/"
"406c6a23-e283-4300-9484-54e78c8ae675")
df = read_csv_cache(cache, url, sep=';')
df['jour'] = | to_datetime(df['jour']) | pandas.to_datetime |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import json
import logging
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from .visualization_utilize import VisualizationUtilize
from ..data.base import UniformScene
from .. import data as g_data
from GraphicsDL.graphicsutils import g_io, g_str
class AnalysisUtilize(object):
@staticmethod
def path_specifier(tar_path_list, force_refresh):
tar_path = os.path.join(*tar_path_list)
csv_path = '.'.join((tar_path, 'csv'))
png_path = '.'.join((tar_path, 'png'))
csv_path = None if not force_refresh and os.path.exists(csv_path) else csv_path
png_path = None if not force_refresh and os.path.exists(png_path) else png_path
return csv_path, png_path
class UniSceneAnalyzer(object):
def __init__(self, data_source, scenes_path, objects_path, output_dir, data_cfg, samples_list=None):
self.data_source = data_source
self.scenes_path = scenes_path
self.objects_path = objects_path
self.output_dir = output_dir
self.eval_meta_path = '_'.join(scenes_path.split('_')[:-3]) + '.npz'
self.eval_bbox = None
self.cfg = data_cfg
room_size = np.insert(self.cfg.room_size, 1, self.cfg.room_height)
self.room_size, self.room_stride = np.array(room_size), self.cfg.room_stride
self.room_center = self.room_size * [0.5, 0, 0.5]
self.vox_size = (self.room_size / self.room_stride).astype(np.int32)
self.samples_list = samples_list
self.sample_num = 0
self.label_type = self.cfg.label_type
label_type = getattr(g_data, self.label_type.replace('-', '').upper())()
self.label_list, self.short_label_list = label_type.label_id_map_arr(), label_type.label_id_map_arr(True)
self.num_categories = len(self.label_list)
self.color_map = np.concatenate([label_type.color_map_arr(), [[0, 0, 0]]], axis=0)
self.ignored_label = ['void']
self.scenes_list = []
self.wall_list = [9, 10, 6, 5]
self.camera_fov = [8, 6]
self.render_resolution = [640, 480]
room_half_size = self.room_size[0] / 2
cam_height = 1.2 / 3.2 * room_half_size
self.camera_matrix_list = VisualizationUtilize.get_render_camera_parameter(room_size=room_half_size,
cam_height=cam_height)
def load_data(self):
if len(self.scenes_list) != 0:
return
logging.info(f'\t\tData loading')
scenes_zip = g_io.ZipIO(self.scenes_path)
self.samples_list = scenes_zip.namelist()[:10240]
self.sample_num = len(self.samples_list)
for s_name in self.samples_list:
scene_info = UniformScene()
scene_info.load(json.loads(scenes_zip.read(s_name)))
self.scenes_list.append(scene_info)
logging.info(f'\t\t{len(self.scenes_list)} Data loaded')
if self.label_type != self.scenes_list[0].label_type:
logging.warning(f'\t\tLabel type: Uniform scene - {self.scenes_list[0].label_type}, '
f'analyzer - {self.label_type}')
scenes_zip.close()
def co_occupied(self, num_cls, cor_length=None) -> np.ndarray:
cor_length = cor_length if cor_length else num_cls
acc_list = np.zeros([cor_length], dtype=np.int32)
cor_map = np.zeros([cor_length, cor_length], dtype=np.int32)
for scene in self.scenes_list:
scene_label_ids = np.unique([obj.label_id for obj in scene.objects if obj.label not in self.ignored_label])
if len(scene_label_ids) == 0:
continue
cart_indices = np.transpose(np.asarray([x for x in itertools.product(scene_label_ids, scene_label_ids)]))
cor_map[tuple(cart_indices)] = cor_map[tuple(cart_indices)] + 1
acc_list[scene_label_ids] = acc_list[scene_label_ids] + 1
cor_map = cor_map / np.maximum(np.expand_dims(acc_list, axis=-1), 1)
return cor_map
def obj_number_bincount(self, num_cls: int) -> np.ndarray:
bincount_list = list()
for scene in self.scenes_list:
bincount = np.bincount([obj.label_id for obj in scene.objects if obj.label not in self.ignored_label],
minlength=num_cls)
bincount_list.append(bincount)
return np.asarray(bincount_list, dtype=np.int64)
def vis_3d_analysis(self, force_refresh=False, vis_num=100, stride=1):
self.load_data()
scene_index_list = np.arange(vis_num) * stride
vis_dir = g_str.mkdir_automated(os.path.join(self.output_dir, f'vis_3d'))
objects_zip = g_io.ZipIO(self.objects_path)
for s_i, scene_index in enumerate(scene_index_list):
if s_i >= vis_num:
break
scene = self.scenes_list[scene_index]
vis_path = os.path.join(vis_dir, scene.scene_id)
vox_vis_path = f'{vis_path}_view'
if not force_refresh and os.path.exists(vox_vis_path + '0.png') and os.path.exists(vox_vis_path + '1.png') \
and os.path.exists(vox_vis_path + '2.png') and os.path.exists(vox_vis_path + '3.png'):
continue
objs_points, points_label = scene.parse_scene(objects_zip)
voxel_size = (np.array(self.room_size) / self.room_stride).astype(np.int32)
voxel_room = np.zeros(voxel_size, dtype=np.uint8)
if len(objs_points) > 0:
objs_points = np.concatenate(objs_points, axis=0)
points_label = np.concatenate(points_label, axis=0)
obj_vox_indices = (objs_points / self.room_stride + 0.1).astype(np.int32)
point_xyz_max = (np.array(self.room_size) / self.room_stride).astype(np.int32)
valid_indices = np.logical_and(np.all(obj_vox_indices >= 0, axis=1),
np.all(obj_vox_indices < point_xyz_max, axis=1))
obj_vox_indices, points_label = obj_vox_indices[valid_indices], points_label[valid_indices]
voxel_room[tuple(np.split(obj_vox_indices, 3, axis=-1))] = np.expand_dims(points_label, axis=-1)
VisualizationUtilize.vol_top_view_visualization(vis_path, voxel_room, colors_map=self.color_map)
g_io.PlyIO().dump_vox(vis_path + '.ply', voxel_room, vox_scale=self.room_stride, colors_map=self.color_map)
VisualizationUtilize.scene_vox_visualization(vis_path + '.ply', vis_path, self.camera_matrix_list,
self.camera_fov, self.render_resolution)
def correlation_analysis(self, force_refresh=False):
csv_path, png_path = AnalysisUtilize.path_specifier((self.output_dir, 'correlation'), force_refresh)
if csv_path is None:
return
self.load_data()
cor_map = self.co_occupied(self.num_categories)
cor_df = pd.DataFrame(cor_map, self.short_label_list, self.short_label_list)
cor_df = cor_df.drop(columns=self.ignored_label, index=self.ignored_label)
cor_df.to_csv(csv_path)
heat_map = sns.heatmap(cor_df, vmin=0., vmax=1., cmap='jet')
heat_map.figure.savefig(png_path, bbox_inches='tight')
plt.close('all')
def object_distribution_analysis(self, force_refresh=False, obj_num_max=20):
csv_path, png_path = AnalysisUtilize.path_specifier((self.output_dir, 'object_distribution'), force_refresh)
if csv_path is None:
return
self.load_data()
scene_obj_num = np.sum(self.obj_number_bincount(self.num_categories), axis=-1, keepdims=False)
scene_obj_num_bc = np.bincount(scene_obj_num, minlength=obj_num_max + 1)
scene_obj_num_bc[obj_num_max] = np.sum(scene_obj_num_bc[obj_num_max:])
scene_obj_num_bc = scene_obj_num_bc[:obj_num_max + 1]
scene_obj_num_df = | pd.DataFrame(scene_obj_num_bc) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Scientific Computing and Visualization with Spyder
Created on Thu May 20 10:17:27 2021
@author: <NAME>
"""
# %% Import libraries
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as stats
import seaborn as sns
# %% Load raw data (parquet)
data = pd.read_parquet("parsed_data_public.parquet")
# %% Let's explore age
print(data.d_age.describe())
age = data.d_age.tolist()
# %% Save some variables and display them in the Variable Explorer
max_age = data.d_age.min()
min_age = data.d_age.max()
# %% Plot age with pandas
data.d_age.plot.hist(bins=25, alpha=0.5)
# %% Plot age with seaborn (and search for help from IPython Console)
sns.histplot(data.d_age, kde=True, bins=25)
plt.show()
# %% Plot age and mean
sns.histplot(data.d_age, kde=True, bins=25)
plt.xlabel('Age')
plt.axvline(data.d_age.mean(), color='k', linestyle='dashed', linewidth=1)
min_ylim, max_ylim = plt.ylim()
plt.text(data.d_age.mean()*1.1, max_ylim*0.9,
'Mean: {:.2f}'.format(data.d_age.mean()))
plt.show()
# %% Demographic variables list
demograph = [v for v in list(data.columns) if v.startswith("d_")]
# %% Cognitive ability questions
# Select the questions for the cognitive ability test (14 questions).
# Add the correct answers in a new column.
test_items = pd.read_csv("test_items.csv")
ca_test = data.copy() # Make a copy of the original dataframe
right_answers = []
for ID, ROW in test_items.iterrows():
right_answers.append(ROW.iloc[ROW["option_correct"] + 2])
test_items["right_answer"] = right_answers
for ID, ROW in test_items.iterrows():
QUESTION = "q" + str(ROW["ID"])
ANSWER = str(ROW["right_answer"])
try:
ca_test.dropna(subset=[QUESTION], inplace=True)
ca_test["resp_" + QUESTION] = ca_test.apply(lambda row: row[QUESTION] == ANSWER, axis=1)
except KeyError:
print(f"{QUESTION} not found.")
# The identification of some answers failed due to formal discrepancies.
ca_test.q18154 = pd.Series(ca_test.q18154, dtype="int")
ca_test.q18154 = pd.Series(ca_test.q18154, dtype="string")
ca_test.resp_q18154 = ca_test.apply(lambda row: row["q18154"] == "26", axis=1)
ca_test.q255 = | pd.Series(ca_test.q255, dtype="int") | pandas.Series |
"""
July 2021
This code retrieves the calculation of sand use for concrete and glass production in the building sector in 26 global regions. For the original code & latest updates, see: https://github.com/
The dynamic material model is based on the BUMA model developed by <NAME>, Leiden University, the Netherlands. For the original code & latest updates, see: https://github.com/SPDeetman/BUMA
The dynamic stock model is based on the ODYM model developed by <NAME>, Uni Freiburg, Germany. For the original code & latest updates, see: https://github.com/IndEcol/ODYM
*NOTE: Insert location of GloBus-main folder in 'dir_path' (line 23) before running the code
Software version: Python 3.7
"""
#%% GENERAL SETTING & STATEMENTS
import pandas as pd
import numpy as np
import os
import ctypes
import math
# set current directory
dir_path = ""
os.chdir(dir_path)
# Set general constants
regions = 26 #26 IMAGE regions
building_types = 4 #4 building types: detached, semi-detached, appartments & high-rise
area = 2 #2 areas: rural & urban
materials = 2 #2 materials: Concrete, Glass
inflation = 1.2423 #gdp/cap inflation correction between 2005 (IMAGE data) & 2016 (commercial calibration) according to https://www.bls.gov/data/inflation_calculator.htm
# Set Flags for sensitivity analysis
flag_alpha = 0 # switch for the sensitivity analysis on alpha, if 1 the maximum alpha is 10% above the maximum found in the data
flag_ExpDec = 0 # switch to choose between Gompertz and Exponential Decay function for commercial floorspace demand (0 = Gompertz, 1 = Expdec)
flag_Normal = 0 # switch to choose between Weibull and Normal lifetime distributions (0 = Weibull, 1 = Normal)
flag_Mean = 0 # switch to choose between material intensity settings (0 = regular regional, 1 = mean, 2 = high, 3 = low, 4 = median)
#%%Load files & arrange tables ----------------------------------------------------
if flag_Mean == 0:
file_addition = ''
elif flag_Mean == 1:
file_addition = '_mean'
elif flag_Mean ==2:
file_addition = '_high'
elif flag_Mean ==3:
file_addition = '_low'
else:
file_addition = '_median'
# Load Population, Floor area, and Service value added (SVA) Database csv-files
pop = pd.read_csv('files_population/pop.csv', index_col = [0]) # Pop; unit: million of people; meaning: global population (over time, by region)
rurpop = pd.read_csv('files_population/rurpop.csv', index_col = [0]) # rurpop; unit: %; meaning: the share of people living in rural areas (over time, by region)
housing_type = pd.read_csv('files_population\Housing_type.csv') # Housing_type; unit: %; meaning: the share of the NUMBER OF PEOPLE living in a particular building type (by region & by area)
floorspace = pd.read_csv('files_floor_area/res_Floorspace.csv') # Floorspace; unit: m2/capita; meaning: the average m2 per capita (over time, by region & area)
floorspace = floorspace[floorspace.Region != regions + 1] # Remove empty region 27
avg_m2_cap = pd.read_csv('files_floor_area\Average_m2_per_cap.csv') # Avg_m2_cap; unit: m2/capita; meaning: average square meters per person (by region & area (rural/urban) & building type)
sva_pc_2005 = pd.read_csv('files_GDP/sva_pc.csv', index_col = [0])
sva_pc = sva_pc_2005 * inflation # we use the inflation corrected SVA to adjust for the fact that IMAGE provides gdp/cap in 2005 US$
# load material density data csv-files
building_materials_concrete = pd.read_csv('files_material_density\Building_materials_concrete' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
building_materials_glass = pd.read_csv('files_material_density\Building_materials_glass' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
materials_commercial_concrete = pd.read_csv('files_material_density\materials_commercial_concrete' + file_addition + '.csv', index_col = [0]) # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
materials_commercial_glass = pd.read_csv('files_material_density\materials_commercial_glass' + file_addition + '.csv', index_col = [0]) # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
# Load fitted regression parameters for comercial floor area estimate
if flag_alpha == 0:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters.csv', index_col = [0])
else:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters_alpha.csv', index_col = [0])
# Ensure full time series for pop & rurpop (interpolation, some years are missing)
rurpop2 = rurpop.reindex(list(range(1970,2061,1))).interpolate()
pop2 = pop.reindex(list(range(1970,2061,1))).interpolate()
# Remove 1st year, to ensure same Table size as floorspace data (from 1971)
pop2 = pop2.iloc[1:]
rurpop2 = rurpop2.iloc[1:]
#pre-calculate urban population
urbpop = 1 - rurpop2 # urban population is 1 - the fraction of people living in rural areas (rurpop)
# Restructure the tables to regions as columns; for floorspace
floorspace_rur = floorspace.pivot(index="t", columns="Region", values="Rural")
floorspace_urb = floorspace.pivot(index="t", columns="Region", values="Urban")
# Restructuring for square meters (m2/cap)
avg_m2_cap_urb = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_urb.columns = list(map(int,avg_m2_cap_urb.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_urb2 = avg_m2_cap_urb.drop(['Region']) # Remove idle row
avg_m2_cap_rur = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_rur.columns = list(map(int,avg_m2_cap_rur.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_rur2 = avg_m2_cap_rur.drop(['Region']) # Remove idle row
# Restructuring for the Housing types (% of population living in them)
housing_type_urb = housing_type.loc[housing_type['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
housing_type_urb.columns = list(map(int,housing_type_urb.iloc[0])) # name columns according to the row containing the region-labels
housing_type_urb2 = housing_type_urb.drop(['Region']) # Remove idle row
housing_type_rur = housing_type.loc[housing_type['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
housing_type_rur.columns = list(map(int,housing_type_rur.iloc[0])) # name columns according to the row containing the region-labels
housing_type_rur2 = housing_type_rur.drop(['Region']) # Remove idle row
#%% COMMERCIAL building space demand (stock) calculated from Gomperz curve (fitted, using separate regression model)
# Select gompertz curve paramaters for the total commercial m2 demand (stock)
alpha = gompertz['All']['a'] if flag_ExpDec == 0 else 25.601
beta = gompertz['All']['b'] if flag_ExpDec == 0 else 28.431
gamma = gompertz['All']['c'] if flag_ExpDec == 0 else 0.0415
# find the total commercial m2 stock (in Millions of m2)
commercial_m2_cap = pd.DataFrame(index=range(1971,2061), columns=range(1,27))
for year in range(1971,2061):
for region in range(1,27):
if flag_ExpDec == 0:
commercial_m2_cap[region][year] = alpha * math.exp(-beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
else:
commercial_m2_cap[region][year] = max(0.542, alpha - beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
# Subdivide the total across Offices, Retail+, Govt+ & Hotels+
commercial_m2_cap_office = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Offices
commercial_m2_cap_retail = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Retail & Warehouses
commercial_m2_cap_hotels = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Hotels & Restaurants
commercial_m2_cap_govern = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Hospitals, Education, Government & Transportation
minimum_com_office = 25
minimum_com_retail = 25
minimum_com_hotels = 25
minimum_com_govern = 25
for year in range(1971,2061):
for region in range(1,27):
# get the square meter per capita floorspace for 4 commercial applications
office = gompertz['Office']['a'] * math.exp(-gompertz['Office']['b'] * math.exp((-gompertz['Office']['c']/1000) * sva_pc[str(region)][year]))
retail = gompertz['Retail+']['a'] * math.exp(-gompertz['Retail+']['b'] * math.exp((-gompertz['Retail+']['c']/1000) * sva_pc[str(region)][year]))
hotels = gompertz['Hotels+']['a'] * math.exp(-gompertz['Hotels+']['b'] * math.exp((-gompertz['Hotels+']['c']/1000) * sva_pc[str(region)][year]))
govern = gompertz['Govt+']['a'] * math.exp(-gompertz['Govt+']['b'] * math.exp((-gompertz['Govt+']['c']/1000) * sva_pc[str(region)][year]))
#calculate minimum values for later use in historic tail(Region 20: China @ 134 $/cap SVA)
minimum_com_office = office if office < minimum_com_office else minimum_com_office
minimum_com_retail = retail if retail < minimum_com_retail else minimum_com_retail
minimum_com_hotels = hotels if hotels < minimum_com_hotels else minimum_com_hotels
minimum_com_govern = govern if govern < minimum_com_govern else minimum_com_govern
# Then use the ratio's to subdivide the total commercial floorspace into 4 categories
commercial_sum = office + retail + hotels + govern
commercial_m2_cap_office[region][year] = commercial_m2_cap[region][year] * (office/commercial_sum)
commercial_m2_cap_retail[region][year] = commercial_m2_cap[region][year] * (retail/commercial_sum)
commercial_m2_cap_hotels[region][year] = commercial_m2_cap[region][year] * (hotels/commercial_sum)
commercial_m2_cap_govern[region][year] = commercial_m2_cap[region][year] * (govern/commercial_sum)
#%% Add historic tail (1720-1970) + 100 yr initial --------------------------------------------
# load historic population development
hist_pop = pd.read_csv('files_initial_stock\hist_pop.csv', index_col = [0]) # initial population as a percentage of the 1970 population; unit: %; according to the Maddison Project Database (MPD) 2018 (Groningen University)
# Determine the historical average global trend in floorspace/cap & the regional rural population share based on the last 10 years of IMAGE data
floorspace_urb_trend_by_region = [0 for j in range(0,26)]
floorspace_rur_trend_by_region = [0 for j in range(0,26)]
rurpop_trend_by_region = [0 for j in range(0,26)]
commercial_m2_cap_office_trend = [0 for j in range(0,26)]
commercial_m2_cap_retail_trend = [0 for j in range(0,26)]
commercial_m2_cap_hotels_trend = [0 for j in range(0,26)]
commercial_m2_cap_govern_trend = [0 for j in range(0,26)]
# For the RESIDENTIAL & COMMERCIAL floorspace: Derive the annual trend (in m2/cap) over the initial 10 years of IMAGE data
for region in range(1,27):
floorspace_urb_trend_by_year = [0 for i in range(0,10)]
floorspace_rur_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_office_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_retail_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_hotels_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_govern_trend_by_year = [0 for i in range(0,10)]
# Get the growth by year (for the first 10 years)
for year in range(1970,1980):
floorspace_urb_trend_by_year[year-1970] = floorspace_urb[region][year+1]/floorspace_urb[region][year+2]
floorspace_rur_trend_by_year[year-1970] = floorspace_rur[region][year+1]/floorspace_rur[region][year+2]
commercial_m2_cap_office_trend_by_year[year-1970] = commercial_m2_cap_office[region][year+1]/commercial_m2_cap_office[region][year+2]
commercial_m2_cap_retail_trend_by_year[year-1970] = commercial_m2_cap_retail[region][year+1]/commercial_m2_cap_retail[region][year+2]
commercial_m2_cap_hotels_trend_by_year[year-1970] = commercial_m2_cap_hotels[region][year+1]/commercial_m2_cap_hotels[region][year+2]
commercial_m2_cap_govern_trend_by_year[year-1970] = commercial_m2_cap_govern[region][year+1]/commercial_m2_cap_govern[region][year+2]
rurpop_trend_by_region[region-1] = ((1-(rurpop[str(region)][1980]/rurpop[str(region)][1970]))/10)*100
floorspace_urb_trend_by_region[region-1] = sum(floorspace_urb_trend_by_year)/10
floorspace_rur_trend_by_region[region-1] = sum(floorspace_rur_trend_by_year)/10
commercial_m2_cap_office_trend[region-1] = sum(commercial_m2_cap_office_trend_by_year)/10
commercial_m2_cap_retail_trend[region-1] = sum(commercial_m2_cap_retail_trend_by_year)/10
commercial_m2_cap_hotels_trend[region-1] = sum(commercial_m2_cap_hotels_trend_by_year)/10
commercial_m2_cap_govern_trend[region-1] = sum(commercial_m2_cap_govern_trend_by_year)/10
# Average global annual decline in floorspace/cap in %, rural: 1%; urban 1.2%; commercial: 1.26-2.18% /yr
floorspace_urb_trend_global = (1-(sum(floorspace_urb_trend_by_region)/26))*100 # in % decrease per annum
floorspace_rur_trend_global = (1-(sum(floorspace_rur_trend_by_region)/26))*100 # in % decrease per annum
commercial_m2_cap_office_trend_global = (1-(sum(commercial_m2_cap_office_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_retail_trend_global = (1-(sum(commercial_m2_cap_retail_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_hotels_trend_global = (1-(sum(commercial_m2_cap_hotels_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_govern_trend_global = (1-(sum(commercial_m2_cap_govern_trend)/26))*100 # in % decrease per annum
# define historic floorspace (1820-1970) in m2/cap
floorspace_urb_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=floorspace_urb.columns)
floorspace_rur_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=floorspace_rur.columns)
rurpop_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=rurpop.columns)
pop_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=pop2.columns)
commercial_m2_cap_office_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_govern.columns)
# Find minumum or maximum values in the original IMAGE data (Just for residential, commercial minimum values have been calculated above)
minimum_urb_fs = floorspace_urb.values.min() # Region 20: China
minimum_rur_fs = floorspace_rur.values.min() # Region 20: China
maximum_rurpop = rurpop.values.max() # Region 9 : Eastern Africa
# Calculate the actual values used between 1820 & 1970, given the trends & the min/max values
for region in range(1,regions+1):
for year in range(1820,1971):
# MAX of 1) the MINimum value & 2) the calculated value
floorspace_urb_1820_1970[region][year] = max(minimum_urb_fs, floorspace_urb[region][1971] * ((100-floorspace_urb_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
floorspace_rur_1820_1970[region][year] = max(minimum_rur_fs, floorspace_rur[region][1971] * ((100-floorspace_rur_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_office_1820_1970[region][year] = max(minimum_com_office, commercial_m2_cap_office[region][1971] * ((100-commercial_m2_cap_office_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_retail_1820_1970[region][year] = max(minimum_com_retail, commercial_m2_cap_retail[region][1971] * ((100-commercial_m2_cap_retail_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_hotels_1820_1970[region][year] = max(minimum_com_hotels, commercial_m2_cap_hotels[region][1971] * ((100-commercial_m2_cap_hotels_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_govern_1820_1970[region][year] = max(minimum_com_govern, commercial_m2_cap_govern[region][1971] * ((100-commercial_m2_cap_govern_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
# MIN of 1) the MAXimum value & 2) the calculated value
rurpop_1820_1970[str(region)][year] = min(maximum_rurpop, rurpop[str(region)][1970] * ((100+rurpop_trend_by_region[region-1])/100)**(1970-year)) # average annual INcrease by region
# just add the tail to the population (no min/max & trend is pre-calculated in hist_pop)
pop_1820_1970[str(region)][year] = hist_pop[str(region)][year] * pop[str(region)][1970]
urbpop_1820_1970 = 1 - rurpop_1820_1970
# To avoid full model setup in 1820 (all required stock gets built in yr 1) we assume another tail that linearly increases to the 1820 value over a 100 year time period, so 1720 = 0
floorspace_urb_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=floorspace_urb.columns)
floorspace_rur_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=floorspace_rur.columns)
rurpop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=rurpop.columns)
urbpop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=urbpop.columns)
pop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=pop2.columns)
commercial_m2_cap_office_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_govern.columns)
for region in range(1,27):
for time in range(1721,1820):
# MAX(0,...) Because of floating point deviations, leading to negative stock in some cases
floorspace_urb_1721_1820[int(region)][time] = max(0.0, floorspace_urb_1820_1970[int(region)][1820] - (floorspace_urb_1820_1970[int(region)][1820]/100)*(1820-time))
floorspace_rur_1721_1820[int(region)][time] = max(0.0, floorspace_rur_1820_1970[int(region)][1820] - (floorspace_rur_1820_1970[int(region)][1820]/100)*(1820-time))
rurpop_1721_1820[str(region)][time] = max(0.0, rurpop_1820_1970[str(region)][1820] - (rurpop_1820_1970[str(region)][1820]/100)*(1820-time))
urbpop_1721_1820[str(region)][time] = max(0.0, urbpop_1820_1970[str(region)][1820] - (urbpop_1820_1970[str(region)][1820]/100)*(1820-time))
pop_1721_1820[str(region)][time] = max(0.0, pop_1820_1970[str(region)][1820] - (pop_1820_1970[str(region)][1820]/100)*(1820-time))
commercial_m2_cap_office_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_office_1820_1970[region][1820] - (commercial_m2_cap_office_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_retail_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_retail_1820_1970[region][1820] - (commercial_m2_cap_retail_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_hotels_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_hotels_1820_1970[region][1820] - (commercial_m2_cap_hotels_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_govern_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_govern_1820_1970[region][1820] - (commercial_m2_cap_govern_1820_1970[region][1820]/100)*(1820-time))
# combine historic with IMAGE data here
rurpop_tail = rurpop_1820_1970.append(rurpop2, ignore_index=False)
urbpop_tail = urbpop_1820_1970.append(urbpop, ignore_index=False)
pop_tail = pop_1820_1970.append(pop2, ignore_index=False)
floorspace_urb_tail = floorspace_urb_1820_1970.append(floorspace_urb, ignore_index=False)
floorspace_rur_tail = floorspace_rur_1820_1970.append(floorspace_rur, ignore_index=False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index=False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index=False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index=False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index=False)
rurpop_tail = rurpop_1721_1820.append(rurpop_1820_1970.append(rurpop2, ignore_index=False), ignore_index=False)
urbpop_tail = urbpop_1721_1820.append(urbpop_1820_1970.append(urbpop, ignore_index=False), ignore_index=False)
pop_tail = pop_1721_1820.append(pop_1820_1970.append(pop2, ignore_index=False), ignore_index=False)
floorspace_urb_tail = floorspace_urb_1721_1820.append(floorspace_urb_1820_1970.append(floorspace_urb, ignore_index=False), ignore_index=False)
floorspace_rur_tail = floorspace_rur_1721_1820.append(floorspace_rur_1820_1970.append(floorspace_rur, ignore_index=False), ignore_index=False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1721_1820.append(commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index=False), ignore_index=False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1721_1820.append(commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index=False), ignore_index=False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1721_1820.append(commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index=False), ignore_index=False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1721_1820.append(commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index=False), ignore_index=False)
#%% SQUARE METER Calculations -----------------------------------------------------------
# adjust the share for urban/rural only (shares in csv are as percantage of the total(Rur + Urb), we needed to adjust the urban shares to add up to 1, same for rural)
housing_type_rur3 = housing_type_rur2/housing_type_rur2.sum()
housing_type_urb3 = housing_type_urb2/housing_type_urb2.sum()
# calculte the total rural/urban population (pop2 = millions of people, rurpop2 = % of people living in rural areas)
people_rur = pd.DataFrame(rurpop_tail.values*pop_tail.values, columns=pop_tail.columns, index=pop_tail.index)
people_urb = pd.DataFrame(urbpop_tail.values*pop_tail.values, columns=pop_tail.columns, index=pop_tail.index)
# calculate the total number of people (urban/rural) BY HOUSING TYPE (the sum of det,sem,app & hig equals the total population e.g. people_rur)
people_det_rur = pd.DataFrame(housing_type_rur3.iloc[0].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_sem_rur = pd.DataFrame(housing_type_rur3.iloc[1].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_app_rur = pd.DataFrame(housing_type_rur3.iloc[2].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_hig_rur = pd.DataFrame(housing_type_rur3.iloc[3].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_det_urb = pd.DataFrame(housing_type_urb3.iloc[0].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_sem_urb = pd.DataFrame(housing_type_urb3.iloc[1].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_app_urb = pd.DataFrame(housing_type_urb3.iloc[2].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_hig_urb = pd.DataFrame(housing_type_urb3.iloc[3].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
# calculate the total m2 (urban/rural) BY HOUSING TYPE (= nr. of people * OWN avg m2, so not based on IMAGE)
m2_unadjusted_det_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[0].values * people_det_rur.values, columns=people_det_rur.columns, index=people_det_rur.index)
m2_unadjusted_sem_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[1].values * people_sem_rur.values, columns=people_sem_rur.columns, index=people_sem_rur.index)
m2_unadjusted_app_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[2].values * people_app_rur.values, columns=people_app_rur.columns, index=people_app_rur.index)
m2_unadjusted_hig_rur = | pd.DataFrame(avg_m2_cap_rur2.iloc[3].values * people_hig_rur.values, columns=people_hig_rur.columns, index=people_hig_rur.index) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
folder_path_txt = "hidden/box_folder_path.txt"
with open(folder_path_txt) as f:
content = f.readlines()
content = [x.strip() for x in content]
box_folder_path = content[0]
file_path = "/data/d_traj.csv"
df = pd.read_csv(box_folder_path + file_path)
start = 0
stop = 600
time = df['index'].values[start:stop]
dist = np.clip( | pd.to_numeric(df['load'], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simulation framework for testing LDSC
Models for SNP effects:
- Infinitesimal (can simulate n correlated traits)
- Spike & slab (can simulate up to 2 correlated traits)
- Annotation-informed
Features:
- Field aggregation tools for annotation-informed model and
population stratification with many covariates.
- Automatic adjustment of genetic correlation parameters
to allow for the joint simulation of up to 100 randomly
correlated phenotypes.
- Methods for binarizing phenotypes to have a certain prevalence
and for adding ascertainment bias to binarized phenotypes.
@author: nbaya
"""
import hail as hl
from hail import dtype
from hail.typecheck import typecheck, oneof, nullable
from hail.expr.expressions import expr_float64, expr_int32, expr_array, expr_call
from hail.matrixtable import MatrixTable
from hail.table import Table
from hail.utils.java import Env
import numpy as np
import pandas as pd
import random
import string
import scipy.stats as stats
@typecheck(mt=MatrixTable,
genotype=oneof(expr_int32,
expr_float64,
expr_call),
h2=(oneof(float,
int,
list,
np.ndarray)),
pi=nullable(oneof(float,
int,
list,
np.ndarray)),
rg=nullable(oneof(float,
int,
list,
np.ndarray)),
annot=nullable(oneof(expr_float64,
expr_int32)),
popstrat=nullable(oneof(expr_int32,
expr_float64)),
popstrat_var=nullable(oneof(float,
int)),
exact_h2=bool)
def simulate_phenotypes(mt, genotype, h2, pi=None, rg=None, annot=None, popstrat=None,
popstrat_var=None, exact_h2=False):
r"""Simulate phenotypes for testing LD score regression.
Simulates betas (SNP effects) under the infinitesimal, spike & slab, or
annotation-informed models, depending on parameters passed. Optionally adds
population stratification.
Parameters
----------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` containing genotypes to be used. Also should contain
variant annotations as row fields if running the annotation-informed
model or covariates as column fields if adding population stratification.
genotype : :class:`.Expression` or :class:`.CallExpression`
Entry field containing genotypes of individuals to be used for the
simulation.
h2 : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`
SNP-based heritability of simulated trait.
pi : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Probability of SNP being causal when simulating under the spike & slab
model.
rg : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Genetic correlation between traits.
annot : :class:`.Expression`, optional
Row field to use as our aggregated annotations.
popstrat: :class:`.Expression`, optional
Column field to use as our aggregated covariates for adding population
stratification.
exact_h2: :obj:`bool`, optional
Whether to exactly simulate ratio of variance of genetic component of
phenotype to variance of phenotype to be h2. If `False`, ratio will be
h2 in expectation. Observed h2 in the simulation will be close to
expected h2 for large-scale simulations.
Returns
-------
:class:`.MatrixTable`
:class:`.MatrixTable` with simulated betas and phenotypes, simulated according
to specified model.
"""
h2 = h2.tolist() if type(h2) is np.ndarray else ([h2] if type(h2) is not list else h2)
pi = pi.tolist() if type(pi) is np.ndarray else pi
uid = Env.get_uid(base=100)
mt = annotate_all(mt=mt,
row_exprs={} if annot is None else {'annot_'+uid: annot},
col_exprs={} if popstrat is None else {'popstrat_'+uid: popstrat},
entry_exprs={'gt_'+uid: genotype.n_alt_alleles() if genotype.dtype is dtype('call') else genotype})
mt, pi, rg = make_betas(mt=mt,
h2=h2,
pi=pi,
annot=None if annot is None else mt['annot_'+uid],
rg=rg)
mt = calculate_phenotypes(mt=mt,
genotype=mt['gt_'+uid],
beta=mt['beta'],
h2=h2,
popstrat=None if popstrat is None else mt['popstrat_'+uid],
popstrat_var=popstrat_var,
exact_h2=exact_h2)
mt = annotate_all(mt=mt,
global_exprs={'ldscsim': hl.struct(**{'h2': h2[0] if len(h2)==1 else h2,
**({} if pi == [None] else {'pi': pi}),
**({} if rg == [None] else {'rg': rg[0] if len(rg)==1 else rg}),
**({} if annot is None else {'is_annot_inf': True}),
**({} if popstrat is None else {'is_popstrat_inf': True}),
**({} if popstrat_var is None else {'popstrat_var': popstrat_var}),
'exact_h2': exact_h2
})})
mt = _clean_fields(mt, uid)
return mt
@typecheck(mt=MatrixTable,
h2=(oneof(float,
int,
list,
np.ndarray)),
pi=nullable(oneof(float,
int,
list,
np.ndarray)),
annot=nullable(oneof(expr_float64,
expr_int32)),
rg=nullable(oneof(float,
int,
list,
np.ndarray)))
def make_betas(mt, h2, pi=None, annot=None, rg=None):
r"""Generates betas under different models.
Simulates betas (SNP effects) under the infinitesimal, spike & slab, or
annotation-informed models, depending on parameters passed.
Parameters
----------
mt : :class:`.MatrixTable`
MatrixTable containing genotypes to be used. Also should contain
variant annotations as row fields if running the annotation-informed
model or covariates as column fields if adding population stratification.
h2 : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`
SNP-based heritability of simulated trait(s).
pi : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Probability of SNP being causal when simulating under the spike & slab
model. If doing two-trait spike & slab `pi` is a list of probabilities for
overlapping causal SNPs (see docstring of :func:`.multitrait_ss`)
annot : :class:`.Expression`, optional
Row field of aggregated annotations for annotation-informed model.
rg : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Genetic correlation between traits.
Returns
-------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` with betas as a row field, simulated according to specified model.
pi : :obj:`list`
Probability of a SNP being causal for different traits, possibly altered
from input `pi` if covariance matrix for multitrait simulation was not
positive semi-definite.
rg : :obj:`list`
Genetic correlation between traits, possibly altered from input `rg` if
covariance matrix for multitrait simulation was not positive semi-definite.
"""
h2 = h2.tolist() if type(h2) is np.ndarray else (
[h2] if type(h2) is not list else h2)
pi = pi.tolist() if type(pi) is np.ndarray else (
[pi] if type(pi) is not list else pi)
rg = rg.tolist() if type(rg) is np.ndarray else (
[rg] if type(rg) is not list else rg)
assert (all(x >= 0 and x <= 1 for x in h2)
), 'h2 values must be between 0 and 1'
assert (pi is not [None]) or all(
x >= 0 and x <= 1 for x in pi), 'pi values for spike & slab must be between 0 and 1'
assert (rg == [None] or all(x >= -1 and x <= 1 for x in rg)
), 'rg values must be between -1 and 1 or None'
if annot is not None: # multi-trait annotation-informed
assert rg == [
None], 'Correlated traits not supported for annotation-informed model'
h2 = h2 if type(h2) is list else [h2]
annot_sum = mt.aggregate_rows(hl.agg.sum(annot))
mt = mt.annotate_rows(beta=hl.literal(h2).map(
lambda x: hl.rand_norm(0, hl.sqrt(annot*x/(annot_sum*M)))))
elif len(h2) > 1 and (pi == [None] or pi == [1]): # multi-trait correlated infinitesimal
mt, rg = multitrait_inf(mt=mt,
h2=h2,
rg=rg)
elif len(h2) == 2 and len(pi) > 1 and len(rg)==1: # two trait correlated spike & slab
print('multitrait ss')
mt, pi, rg = multitrait_ss(mt=mt,
h2=h2,
rg=0 if rg is [None] else rg[0],
pi=pi)
elif len(h2) == 1 and len(pi) == 1: # single trait infinitesimal/spike & slab
M = mt.count_rows()
pi_temp = 1 if pi == [None] else pi[0]
mt = mt.annotate_rows(beta=hl.rand_bool(
pi_temp)*hl.rand_norm(0, hl.sqrt(h2[0]/(M*pi_temp))))
else:
raise ValueError('Parameters passed do not match any models.')
return mt, pi, rg
@typecheck(mt=MatrixTable,
h2=nullable(oneof(float,
int,
list,
np.ndarray)),
rg=nullable(oneof(float,
int,
list)),
cov_matrix=nullable(np.ndarray),
seed=nullable(int))
def multitrait_inf(mt, h2=None, rg=None, cov_matrix=None, seed=None):
r"""Generates correlated betas for multi-trait infinitesimal simulations for
any number of phenotypes.
Parameters
----------
mt : :class:`.MatrixTable`
MatrixTable for simulated phenotype.
h2 : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Desired SNP-based heritability (:math:`h^2`) of simulated traits.
If `h2` is ``None``, :math:`h^2` is based on diagonal of `cov_matrix`.
rg : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Desired genetic correlation (:math:`r_g`) between simulated traits.
If simulating more than two correlated traits, `rg` should be a list
of :math:`rg` values corresponding to the upper right triangle of the
covariance matrix. If `rg` is ``None`` and `cov_matrix` is ``None``, :math:`r_g`
is assumed to be 0 between traits. If `rg` and `cov_matrix` are both
not None, :math:`r_g` values from `cov_matrix` take precedence.
cov_matrix : :class:`numpy.ndarray`, optional
Covariance matrix for traits, **unscaled by :math:`M`**, the number of SNPs.
Overrides `h2` and `rg` even when `h2` or `rg` are not ``None``.
seed : :obj:`int`, optional
Seed for random number generator. If `seed` is ``None``, `seed` is set randomly.
Returns
-------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` with simulated SNP effects as a row field of arrays.
rg : :obj:`list`
Genetic correlation between traits, possibly altered from input `rg` if
covariance matrix was not positive semi-definite.
"""
uid = Env.get_uid(base=100)
h2 = (h2.tolist() if type(h2) is np.ndarray else ([h2] if type(h2) is not list else h2))
rg = rg.tolist() if type(rg) is np.ndarray else ([rg] if type(rg) is not list else rg)
assert (all(x >= 0 and x <= 1 for x in h2)), 'h2 values must be between 0 and 1'
assert h2 is not [None] or cov_matrix is not None, 'h2 and cov_matrix cannot both be None'
seed = seed if seed is not None else int(str(Env.next_seed())[:8])
M = mt.count_rows()
if cov_matrix != None:
n_phens = cov_matrix.shape[0]
else:
n_phens = len(h2)
if rg == [None]:
print(f'Assuming rg=0 for all {n_phens} traits')
rg = [0]*int((n_phens**2-n_phens)/2)
assert (all(x >= -1 and x <= 1 for x in rg)
), 'rg values must be between 0 and 1'
cov, rg = get_cov_matrix(h2, rg)
cov = (1/M)*cov
# seed random state for replicability
randstate = np.random.RandomState(int(seed))
betas = randstate.multivariate_normal(
mean=np.zeros(n_phens), cov=cov, size=[M, ])
df = pd.DataFrame([0]*M, columns=['beta'])
tb = hl.Table.from_pandas(df)
tb = tb.add_index().key_by('idx')
tb = tb.annotate(beta=hl.literal(betas.tolist())[hl.int32(tb.idx)])
mt = mt.add_row_index(name='row_idx'+uid)
mt = mt.annotate_rows(beta=tb[mt['row_idx'+uid]]['beta'])
mt = _clean_fields(mt, uid)
return mt, rg
@typecheck(mt=MatrixTable,
h2=oneof(list,
np.ndarray),
pi=oneof(list,
np.ndarray),
rg=oneof(float,
int),
seed=nullable(int))
def multitrait_ss(mt, h2, pi, rg=0, seed=None):
r"""Generates spike & slab betas for simulation of two correlated phenotypes.
Parameters
----------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` for simulated phenotype.
h2 : :obj:`list` or :class:`numpy.ndarray`
Desired SNP-based heritability of simulated traits.
pi : :obj:`list` or :class:`numpy.ndarray`
List of proportion of SNPs: :math:`p_{TT}`, :math:`p_{TF}`, :math:`p_{FT}`
:math:`p_{TT}` is the proportion of SNPs that are causal for both traits,
:math:`p_{TF}` is the proportion of SNPs that are causal for trait 1 but not trait 2,
:math:`p_{FT}` is the proportion of SNPs that are causal for trait 2 but not trait 1.
rg : :obj:`float` or :obj:`int`
Genetic correlation between traits.
seed : :obj:`int`, optional
Seed for random number generator. If `seed` is ``None``, `seed` is set randomly.
Warning
-------
May give inaccurate results if chosen parameters make the covariance matrix
not positive semi-definite. Covariance matrix is likely to not be positive
semi-definite when :math:`p_{TT}` is small and rg is large.
Returns
-------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` with simulated SNP effects as a row field of arrays.
pi : :obj:`list` or :class:`numpy.ndarray`
List of proportion of SNPs: :math:`p_{TT}`, :math:`p_{TF}`, :math:`p_{FT}`.
Possibly altered if covariance matrix of traits was not positive semi-definite.
rg : :obj:`list`
Genetic correlation between traits, possibly altered from input `rg` if
covariance matrix was not positive semi-definite.
"""
assert sum(pi) <= 1, "probabilities of being causal must sum to be less than 1"
seed = seed if seed is not None else int(str(Env.next_seed())[:8])
ptt, ptf, pft, pff = pi[0], pi[1], pi[2], 1-sum(pi)
cov_matrix = np.asarray([[1/(ptt+ptf), rg/ptt], [rg/ptt, 1/(ptt+pft)]])
M = mt.count_rows()
# seed random state for replicability
randstate = np.random.RandomState(int(seed))
if np.any(np.linalg.eigvals(cov_matrix) < 0):
print('adjusting parameters to make covariance matrix positive semidefinite')
rg0, ptt0 = rg, ptt
while np.any(np.linalg.eigvals(cov_matrix) < 0): # check positive semidefinite
rg = round(0.99*rg, 6)
ptt = round(ptt+(pff)*0.001, 6)
cov_matrix = np.asarray(
[[1/(ptt+ptf), rg/ptt], [rg/ptt, 1/(ptt+pft)]])
pff0, pff = pff, 1-sum([ptt, ptf, pft])
print(f'rg: {rg0} -> {rg}\nptt: {ptt0} -> {ptt}\npff: {pff0} -> {pff}')
pi = [ptt, ptf, pft, pff]
beta = randstate.multivariate_normal(mean=np.zeros(2),
cov=cov_matrix,
size=[int(M), ])
zeros = np.zeros(shape=int(M)).T
beta_matrix = np.stack((beta, np.asarray([beta[:, 0], zeros]).T,
np.asarray([zeros, zeros]).T,
np.asarray([zeros, beta[:, 1]]).T), axis=1)
idx = np.random.choice(a=[0, 1, 2, 3],
size=int(M),
p=[ptt, ptf, pft, pff])
betas = beta_matrix[range(int(M)), idx, :]
betas[:, 0] *= (h2[0]/M)**(1/2)
betas[:, 1] *= (h2[1]/M)**(1/2)
df = | pd.DataFrame([0]*M, columns=['beta']) | pandas.DataFrame |
import pandas as pd
from .video import Video
def get_videos_pages(cursor):
"""
Get the set of pages by load_video event
:param cursor:
:return:
"""
request = """
select *
from load_video
"""
cursor.execute(request)
data = cursor.fetchall()
columns_names = []
for i in cursor.description[:]:
columns_names.append(i[0])
df = | pd.DataFrame(data=data, columns=columns_names) | pandas.DataFrame |
from PIL import Image, ImageDraw, ImageFont
import io
import numpy as np
import pandas as pd
import folium
from matplotlib.colors import LinearSegmentedColormap, rgb_to_hsv, hsv_to_rgb
import scipy.ndimage.filters
from pathlib import Path
pd.options.display.max_columns = 50
def main(dir):
# Loading Data Set
print("Loading dataset")
RentalData = pd.read_csv(dir + r'\data\processed\RentalData2015.csv', delimiter=",", encoding="utf-8")
# Changind the StartDate and EndDate to datetime format
RentalData["StartDate"] = pd.to_datetime(RentalData["StartDate"])
RentalData["EndDate"] = | pd.to_datetime(RentalData["EndDate"]) | pandas.to_datetime |
import snowflake.connector as sf
import pandas as pd
import matplotlib.pyplot as plt
from config import config
import numpy as np
# Connection String
conn = sf.connect(
user=config.username,
password=config.password,
account=config.account
)
def test_connection(connect, query):
cursor = connect.cursor()
cursor.execute(query)
cursor.close()
sql5 = """
SELECT * FROM "MI_XPRESSCLOUD"."XPRESSFEED"."SP500";
"""
df500 = pd.read_sql(sql5, conn)
sql1 = """
SELECT companyId, proId, personId FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQPROFESSIONAL";
"""
sql2 = """
SELECT proId, proFunctionId FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQPROTOPROFUNCTION";
"""
sql3 = """
SELECT proFunctionId, proFunctionName FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQPROFUNCTION";
"""
sql4 = """
SELECT proId, compensationValue FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQCOMPENSATION";
"""
sql6 = """
SELECT personId, prefix FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQPERSON";
"""
df1 = pd.read_sql(sql1, conn)
#print(df1.head())
df2 = pd.read_sql(sql2, conn)
df3 = pd.read_sql(sql3, conn)
df4 = pd.read_sql(sql4, conn)
dfperson = pd.read_sql(sql6, conn)
df5 = pd.merge(df1, df2, how='left', on='PROID')
df6 = | pd.merge(df5, df3, how='left', on='PROFUNCTIONID') | pandas.merge |
#------------------------------------------------------------------------------------------------------------------------------
# By <NAME>
# (updated October 2018)
#
# Define offset vectors
# An offset vector represents the difference in gene expression profiles between two states (ex. two different conditions like
# disease vs normal)
#-------------------------------------------------------------------------------------------------------------------------------
import os
import pandas as pd
import numpy as np
from keras.models import model_from_json, load_model
from keras import metrics, optimizers
from sklearn.decomposition import PCA
from functions import utils
import pickle
from numpy.random import seed
randomState = 123
seed(randomState)
def gene_space_offset(data_dir, gene_id, percent_low, percent_high):
"""
gene_space_offset(data_dir: string, gene_id: string):
input:
data_dir: directory containing the raw gene expression data for all genes including the target gene (see
gene_id definition).
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below.
In "interpolate_in_gene_space.py", after we sort samples based on the expression level of the
target gene, we want to predict the expression profile of the OTHER genes at different levels
of target gene expression.
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
computation:
offset_vector = average(gene expression of samples that have the highest percent_high% of target gene expression) -
average(gene expression of samples that have the lowest percent_low% of target gene expression)
output:
offset vector (1 x 5548 genes)
Note: offset vector does not include the target gene
"""
# Load arguments
target_gene_file = os.path.join(data_dir, gene_id + ".txt")
non_target_gene_file = os.path.join(data_dir, "train_model_input.txt.xz")
# Output files
offset_file = os.path.join(data_dir, "offset_gene_space.txt")
lowest_file = os.path.join(data_dir, "lowest.txt")
highest_file = os.path.join(data_dir, "highest.txt")
# Read in data
target_gene_data = pd.read_table(target_gene_file, header=0, index_col=0)
non_target_gene_data = pd.read_table(non_target_gene_file, header=0, index_col=0)
# Sort target gene data by expression (lowest --> highest)
target_gene_sorted = target_gene_data.sort_values(by=[gene_id])
# Collect the extreme gene expressions
[low_ids, high_ids] = utils.get_gene_expression_above_percent(target_gene_sorted, gene_id, percent_low, percent_high)
low_exp = non_target_gene_data.loc[low_ids]
high_exp = non_target_gene_data.loc[high_ids]
print('Number of genes in low expression group is {}'.format(low_exp.shape))
print('Number of gene in high expression group is {}'.format(high_exp.shape))
# Average gene expression across samples in each extreme group
lowest_mean = low_exp.mean(axis=0)
highest_mean = high_exp.mean(axis=0)
# Generate offset using average gene expression in original dataset
offset_gene_space = highest_mean - lowest_mean
offset_gene_space_df = pd.Series.to_frame(offset_gene_space).T
# output lowest and highest expressing samples
low_exp.to_csv(lowest_file, sep='\t', float_format="%.5g")
high_exp.to_csv(highest_file, sep='\t', float_format="%.5g")
# ouput gene space offset vector
offset_gene_space_df.to_csv(offset_file, sep='\t', float_format="%.5g")
def vae_latent_space_offset(data_dir, model_dir, encoded_dir, latent_dim, gene_id, percent_low, percent_high):
"""
vae_latent_space_offset(data_dir: string, model_dir: string, encoded_dir: string, gene_id: string):
input:
data_dir: directory containing the raw gene expression data for all genes including the target gene (see
gene_id definition).
model_dir: directory containing the learned vae models
encoded_dir: directory to use to output offset vector to
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below.
In "interpolate_in_vae_latent_space.py", after we sort samples based on the expression level of the
target gene, we want to predict the expression profile of the OTHER genes at different levels
of target gene expression.
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
computation:
offset_vector = average(encoded gene expression of samples that have the highest percent_high% of target gene expression) -
average(encoded gene expression of samples that have the lowest percent_low% of target gene expression)
output:
encoded offset vector (1 x number of latent space features)
Note: offset vector does not include the target gene
"""
# Load arguments
target_gene_file = os.path.join(data_dir, gene_id + ".txt")
non_target_gene_file = os.path.join(data_dir, "train_model_input.txt.xz")
model_file = os.path.join(model_dir, "tybalt_2layer_{}latent_encoder_model.h5".format(latent_dim))
weights_file = os.path.join(model_dir, "tybalt_2layer_{}latent_encoder_weights.h5".format(latent_dim))
# Output files
offset_file = os.path.join(encoded_dir, "offset_latent_space_vae.txt")
lowest_file = os.path.join(encoded_dir, "lowest_encoded_vae.txt")
highest_file = os.path.join(encoded_dir, "highest_encoded_vae.txt")
# Read in data
target_gene_data = pd.read_table(target_gene_file, header=0, index_col=0)
non_target_gene_data = pd.read_table(non_target_gene_file, header=0, index_col=0)
# read in saved models
loaded_model = load_model(model_file)
# load weights into new model
loaded_model.load_weights(weights_file)
# Sort target gene data by expression (lowest --> highest)
target_gene_sorted = target_gene_data.sort_values(by=[gene_id])
# Collect the extreme gene expressions
[low_ids, high_ids] = utils.get_gene_expression_above_percent(target_gene_sorted, gene_id, percent_low, percent_high)
low_exp = non_target_gene_data.loc[low_ids]
high_exp = non_target_gene_data.loc[high_ids]
print('Number of genes in low expression group is {}'.format(low_exp.shape))
print('Number of gene in high expression group is {}'.format(high_exp.shape))
# Use trained model to encode expression data into SAME latent space
low_exp_encoded = loaded_model.predict_on_batch(low_exp)
low_exp_encoded_df = pd.DataFrame(low_exp_encoded, index=low_exp.index)
high_exp_encoded = loaded_model.predict_on_batch(high_exp)
high_exp_encoded_df = pd.DataFrame(high_exp_encoded, index=high_exp.index)
# Average gene expression across samples in each extreme group
lowest_mean = low_exp_encoded_df.mean(axis=0)
highest_mean = high_exp_encoded_df.mean(axis=0)
# Generate offset using average gene expression in original dataset
offset_latent_space = highest_mean - lowest_mean
offset_latent_space_df = pd.Series.to_frame(offset_latent_space).T
# output lowest and highest expressing samples
low_exp_encoded_df.to_csv(lowest_file, sep='\t', float_format="%.5g")
high_exp_encoded_df.to_csv(highest_file, sep='\t', float_format="%.5g")
# ouput gene space offset vector
offset_latent_space_df.to_csv(offset_file, sep='\t', float_format="%.5g")
def pca_latent_space_offset(data_dir, model_dir, encoded_dir, gene_id, percent_low, percent_high):
"""
pca_latent_space_offset(data_dir: string, model_dir: string, encoded_dir: string, gene_id: string):
input:
data_dir: directory containing the raw gene expression data for all genes including the target gene (see
gene_id definition).
model_dir: directory containing the learned pca models
encoded_dir: directory to use to output offset vector to
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below.
In "interpolate_in_pca_latent_space.py", after we sort samples based on the expression level of the
target gene, we want to predict the expression profile of the OTHER genes at different levels
of target gene expression.
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
computation:
offset_vector = average(encoded gene expression of samples that have the highest percent_high% of target gene expression) -
average(encoded gene expression of samples that have the lowest percent_low% of target gene expression)
output:
encoded offset vector (1 x number of latent space features)
Note: offset vector does not include the target gene
"""
# Load arguments
target_gene_file = os.path.join(data_dir, gene_id + ".txt")
non_target_gene_file = os.path.join(data_dir, "train_model_input.txt.xz")
# Output files
offset_file = os.path.join(encoded_dir, "offset_latent_space_pca.txt")
lowest_file = os.path.join(encoded_dir, "lowest_encoded_pca.txt")
highest_file = os.path.join(encoded_dir, "highest_encoded_pca.txt")
# Read in data
target_gene_data = pd.read_table(target_gene_file, header=0, index_col=0)
non_target_gene_data = | pd.read_table(non_target_gene_file, header=0, index_col=0) | pandas.read_table |
from sklearn.datasets import fetch_openml
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import fairlearn.metrics as fm
import fairsd as dsd
#Import dataset, training the classifier, producing y_pred
d = fetch_openml(data_id=1590, as_frame=True)
dataset = d.data
d_train= | pd.get_dummies(dataset) | pandas.get_dummies |
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+
datetime.timedelta(days=7))) & (pandas.to_datetime(frame['Created_tr'])
<= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '10D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min()) & (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '3M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '6M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == 'A-DEC':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= | pandas.to_datetime(frame['Created_tr']) | pandas.to_datetime |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
# import click #command line interface
#import tkinter for simple gui
from tkinter import filedialog, Tk
#automate the boring stuff
import time, os, sys, re, warnings, shutil
#define localfile system
if not 'nb_dir' in globals():
nb_dir = os.getcwd()
data_dir = f"{nb_dir}/data/"
output_dir = f"{data_dir}output/"
roster_dir = f"{data_dir}input/from_Student_Roster/"
zoom_dir = f"{data_dir}input/from_Zoom/"
dict_dir = 'students_email_dictionary_for_zoom_data.xlsx'
#TODO: add dialogue so ta's don't need to manually enter files and change personal settings
#TODO: check that all entries of student attendence are summed from original data (see pd.concat line)
# i.e. check that if a student leaves and then comes back, both entries are accounted for
##################################
##### local file accessability ###
##################################
def get_output_fn():
'''get save_fn from section data'''
sc_lst = list(get_sections()['SecCode'].values)
sc_lst.reverse()
assert(len(sc_lst)>0)#assert at least one section is entered
save_fn = f'attendence of {sc_lst.pop()}'
while len(sc_lst)>0:
save_fn = save_fn + f' and {sc_lst.pop()}'
save_fn = save_fn + '.xlsx'
return save_fn
def get_roster_fn(roster_dir=roster_dir):
os.chdir(roster_dir)
fn_pattern = re.compile(f'''(
[a-zA-Z0-9_%+-]+.xlsx
)''',re.VERBOSE)
roster_fn = None
for string in os.listdir():
ml = fn_pattern.match(string)
if ml is not None:
roster_fn = ml
assert(roster_fn is not None)
return roster_fn.string
def get_zoom_fn_list(zoom_dir = zoom_dir):
os.chdir(zoom_dir)
zoom_fn_list = []
for string in os.listdir():
boo = string.split('.')[-1]=='csv'
if boo:
zoom_fn_list.append(string)
return zoom_fn_list
def get_tutor_no():
return int(get_ta().size/2)
############################################
##### print current sections and ta list ###
############################################
def get_sections(
fn = "my_sections.csv"):
return pd.read_csv(data_dir+fn)
def print_sections(fn = "my_sections.csv"):
print(get_sections(fn=fn))
def add_section(sec_id, sec_code,
fn = "my_sections.csv"):
df = pd.DataFrame({'Sec ID':[sec_id],'SecCode':[sec_code]})
df = pd.concat([get_sections(fn=fn),df])
df.to_csv(fn, index=False)
return True
def get_ta(
fn = "my_ta_list.csv"):
# os.chdir(data_dir)
return pd.read_csv(fn)
def add_ta(name, email,
fn = "my_ta_list.csv"):
df = pd.DataFrame({'name':[name],'email':[email]})
df = pd.concat([get_ta(fn=fn),df])
df.to_csv(fn, index=False)
return True
return pd.concat([get_sections(fn=fn),df])
def print_ta(fn = "my_ta_list.csv"):
print(get_ta(fn=fn))
def print_state(fn_ta = "my_ta_list.csv", fn_sec = "my_sections.csv"):
print('Your current discussion sections:')
print_sections(fn=fn_ta)
print('\nYour current teaching assistants:')
print_ta(fn=fn_sec)
###################################
##### file search gui interface ###
###################################
#setup user interface for file selection
def search_for_file_path (currdir = os.getcwd()):
root = Tk()
tempdir = filedialog.askopenfilename(parent=root,initialdir=currdir, title="Please select desired file.")
root.destroy()
if len(tempdir) > 0: print ("Frames: %s" % tempdir)
return tempdir
#file update user interface for from_Student_Roster and from_Zoom
def search_for_roster():
'''move data_file_name to correct input folder'''
data_file_name = search_for_file_path()
assert(os.path.exists(data_file_name))
#move data_file_name to
shutil.move(data_file_name, roster_dir)
def search_for_zoom_file():
'''move data_file_name to correct input folder'''
data_file_name = search_for_file_path()
assert(os.path.exists(data_file_name))
shutil.move(data_file_name, zoom_dir)
###################################
##### attendance functionality ###
###################################
def get_my_students(roster_fn, my_sections = [4850, 4852, 4858],
my_tutors = ['<EMAIL>', '<EMAIL>', '<EMAIL>'],
skiprows = 14):
# nb_dir = os.getcwd()
os.chdir(roster_dir)
assert(os.path.exists(roster_fn))
df = pd.read_excel(roster_fn, skiprows=skiprows)
col = df.columns[0]
print(f"matching section ID's with the column, '{col}'.")
df['mine'] = False
for si in my_sections:
boo = df[col] == si
df.loc[boo, 'mine'] = True
df_mine = df.query('mine').copy()
email_col = df_mine.columns[-2]
print(f"email columns taken to be '{email_col}'.")
#to .txt file
save_fn = 'my_students.txt'
s_out = pd.concat([pd.Series(my_tutors),df_mine[email_col].dropna()])
s_out.to_csv(save_fn, index=False, header=False)
ns = df_mine[email_col].dropna().size
print(f"printed {ns} students to '{save_fn}'.")
os.chdir(nb_dir)
return df_mine
# return df_mine[email_col].dropna()
def get_attendence(df,
time_thresh = 35,
email_col = 'User Email',
name_col = 'Name (Original Name)',
time_col = 'Total Duration (Minutes)',
skiprows=2,
use_prev_dict = True):
'''
get_attendence() converts Zoom attendence data to Canvas course roster data.
attendence_dir is the relative directory holding only Zoom attendence
'.csv' files. attendence_dir is given relative to this file's directory.
get_attendence() returns a pandas dataframe of the desired attendence
data and saves it in save_fn. df is the dataframe returned by get_my_students().'''
#get file names
save_fn = get_output_fn()
attendance_dir = zoom_dir
#get tutor number
tutor_no = get_tutor_no()
#import attendence spreadsheet
print('inputted attendance files:')
att_fns = get_zoom_fn_list()
print(att_fns)
d = pd.concat([pd.read_csv(fn, skiprows=skiprows) for fn in att_fns], sort=False)
#take the max time in discussion per student
name_lst = list(set(d[name_col].values))
tmp = pd.concat([d[d[name_col]== name].sort_values(time_col,ascending=False).head(1) for name in name_lst])
d = tmp.copy()
# d = d[d.index>0]#drop the TA in charge of discussion (assumed to be at index position 0)
d.reindex()
#import dictionary and update any student emails from Zoom contained therein
os.chdir(data_dir)
try:
if os.path.exists(dict_dir) and use_prev_dict:
dse = | pd.read_excel(dict_dir) | pandas.read_excel |
import argparse
import itertools
import multiprocessing as mp
import os
from inspect import signature
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Timer import Timer, timer
import qpputils as dp
try:
from crossval import InterTopicCrossValidation, IntraTopicCrossValidation
from queries_pre_process import add_topic_to_qdf
except ModuleNotFoundError:
import sys
from pathlib import Path
script_dir = sys.path[0]
# Adding the parent directory to the path
sys.path.append(str(Path(script_dir).parent))
from crossval import InterTopicCrossValidation, IntraTopicCrossValidation
from queries_pre_process import add_topic_to_qdf
PREDICTORS = ['clarity', 'wig', 'nqc', 'smv', 'rsd', 'qf', 'uef/clarity', 'uef/wig', 'uef/nqc', 'uef/smv', 'uef/qf']
# PREDICTORS = ['clarity', 'wig', 'nqc', 'uef/clarity', 'uef/wig', 'uef/nqc']
SIMILARITY_MEASURES = ['Jac_coefficient', 'RBO_EXT_100', 'Top_10_Docs_overlap', 'RBO_FUSED_EXT_100']
parser = argparse.ArgumentParser(description='PageRank UQV Evaluation', usage='python3.7 pr_eval.py -c CORPUS')
parser.add_argument('-c', '--corpus', default='ROBUST', type=str, help='corpus (index) to work with',
choices=['ROBUST', 'ClueWeb12B'])
def calc_best_worst(full_df: pd.DataFrame, ap_df: pd.DataFrame, metric_direction):
bw_param = max if metric_direction == 'best' else min
_ap_vars = ap_df.loc[ap_df.groupby('topic')['ap'].transform(bw_param) == ap_df['ap']].set_index('topic')
_results = []
for col in full_df.set_index(['topic', 'qid']).columns:
pr_df = full_df.loc[:, ['topic', 'qid', col]]
_result = {}
for topic, _df in pr_df.groupby('topic'):
_var_ap = _ap_vars.loc[topic].qid
if type(_var_ap) is str:
_pr_val = _df.loc[_df['qid'] == _var_ap, col].values[0]
else:
_pr_val = np.mean(_df.loc[_df['qid'].isin(_var_ap), col].values)
if metric_direction == 'best':
_var_score = np.count_nonzero(_df[col] < _pr_val) / len(_df)
else:
_var_score = np.count_nonzero(_df[col] > _pr_val) / len(_df)
_result[topic] = {col: _var_score}
_results.append(pd.DataFrame.from_dict(_result, orient='index'))
df = pd.concat(_results, axis=1)
return df
def set_basic_paths(corpus):
res_dir, data_dir = dp.set_environment_paths()
cv_folds = dp.ensure_file(f'{res_dir}/{corpus}/test/2_folds_30_repetitions.json')
ap_file = dp.ensure_file(f'{res_dir}/{corpus}/test/raw/QLmap1000')
pkl_dir = dp.ensure_dir(f'{res_dir}/{corpus}/test/pageRank/pkl_files')
return {'res_dir': res_dir, 'data_dir': data_dir, 'pkl_dir': pkl_dir, 'cv_folds': cv_folds, 'ap_file': ap_file}
def init_eval(corpus, similarity, predictor):
pth_dict = set_basic_paths(corpus)
predictor_pkl_dir = dp.ensure_dir(f"{pth_dict['pkl_dir']}/{predictor}")
predictions_dir = dp.ensure_dir(
f'{pth_dict["res_dir"]}/{corpus}/uqvPredictions/referenceLists/pageRank/raw/{similarity}/{predictor}/predictions')
ap_obj = dp.ResultsReader(pth_dict['ap_file'], 'ap')
ap_df = add_topic_to_qdf(ap_obj.data_df)
cv_obj = InterTopicCrossValidation(predictions_dir=predictions_dir, folds_map_file=pth_dict['cv_folds'])
full_results_df = add_topic_to_qdf(cv_obj.full_set)
return {'predictor_pkl_dir': predictor_pkl_dir, 'ap_obj': ap_obj, 'ap_df': ap_df,
'full_results_df': full_results_df, 'cv_obj': cv_obj}
@timer
def best_worst_metric(corpus, similarity, predictor, metric, load=False):
assert metric == 'best' or metric == 'worst', f'The function expects a known metric. {metric} was passed'
pkl_dir, ap_obj, ap_df, full_results_df, cv_obj = init_eval(corpus, similarity, predictor).values()
_file = f'{pkl_dir}/{similarity}_{metric}_results.pkl'
if load:
_df = load_exec(_file, calc_best_worst, (full_results_df, ap_df, metric))
else:
_df = calc_best_worst(full_results_df, ap_df, metric)
_df.to_pickle(_file)
return calc_s(cv_obj, _df)
def calc_s(cv_obj: InterTopicCrossValidation, full_scores_df: pd.DataFrame):
if hasattr(cv_obj, 'corr_df'):
cv_obj.__delattr__('corr_df')
cv_obj.full_set = full_scores_df
score = cv_obj.calc_test_results()
return float(score)
def load_exec(file_to_load, function_to_exec, args=None):
"""
The function tries to load a pandas DataFrame from Pickle file, if the file doesn't exist it will use the function
that was passed as an argument to generate a new DataFrame.
The assumptions are that the pickle file is a pandas DataFrame (if exists) and
the passed function returns a pandas DataFrame
:param file_to_load: path to the file that should loaded
:param function_to_exec: the function that will be executed in case the file doesn't exist
:param args: single or list of arguments to pass to the exec function
:return: pd.DataFrame
"""
try:
_df = pd.read_pickle(dp.ensure_file(file_to_load))
except AssertionError:
# Checking the signature of the function
sig = signature(function_to_exec)
if bool(sig.parameters):
if len(sig.parameters) > 1:
_df = function_to_exec(*args)
else:
_df = function_to_exec(args)
else:
_df = function_to_exec()
_df.to_pickle(file_to_load)
return _df
def minmax_ap_metric(corpus, similarity, predictor, minmax):
pkl_dir, ap_obj, raw_ap_df, full_pr_df, cv_obj = init_eval(corpus, similarity, predictor).values()
_list = []
for col in full_pr_df.set_index(['topic', 'qid']).columns:
grpby = full_pr_df.loc[:, ['topic', 'qid', col]].set_index('qid').groupby('topic')[col]
_qids = grpby.idxmax() if minmax == 'max' else grpby.idxmin()
_df = raw_ap_df.loc[raw_ap_df.qid.isin(_qids)].set_index('topic')['ap']
_list.append(_df.rename(col))
full_ap_df = pd.concat(_list, axis=1)
return calc_s(cv_obj, full_ap_df) if minmax == 'max' else -calc_s(cv_obj, -full_ap_df)
def run_intra_topic_eval(corpus, similarity, predictor):
spam_time = Timer(f'working on {corpus, similarity, predictor}')
pth_dict = set_basic_paths(corpus)
# {'res_dir': res_dir, 'data_dir': data_dir, 'pkl_dir': pkl_dir, 'cv_folds': cv_folds, 'ap_file': ap_file}
predictions_dir = dp.ensure_dir(os.path.join(pth_dict['res_dir'],
f'{corpus}/uqvPredictions/referenceLists/'
f'pageRank/raw/{similarity}/{predictor}/predictions'))
cv_obj = IntraTopicCrossValidation(predictions_dir=predictions_dir, folds_map_file=pth_dict['cv_folds'],
ap_file=pth_dict['ap_file'], save_calculations=True, test='kendall')
result = cv_obj.calc_test_results()
spam_time.stop()
return {(corpus, similarity, predictor): result}
def interactive_parameters():
# TODO: cover all options for the interactive choice with retries instead of fixed values
corpus = input('What corpus should be used for evaluation?\n')
while corpus != 'ROBUST' and corpus != 'ClueWeb12B':
print(f'Unknown corpus: {corpus}\ntry ROBUST or ClueWeb12B instead')
corpus = input('What corpus should be used for evaluation?\n')
predictor = input('What predictor should be used for evaluation?\n')
while predictor not in PREDICTORS:
print(f'Unknown predictor: {predictor}\n try one of the available predictors instead, e.g.\n{PREDICTORS}')
predictor = input('What predictor should be used for evaluation?\n')
similarity = input('What similarity should be used for evaluation?\n')
while similarity not in SIMILARITY_MEASURES:
print(
f'Unknown similarity: {similarity}\n try one of the available similarities instead, e.g.\n{SIMILARITY_MEASURES}')
similarity = input('What similarity should be used for evaluation?\n')
return corpus, similarity, predictor
def run_all(metric_func):
with mp.Pool(processes=40) as pool:
results = pool.starmap(metric_func,
itertools.product({'ROBUST', 'ClueWeb12B'}, SIMILARITY_MEASURES, PREDICTORS))
df = | pd.DataFrame(results) | pandas.DataFrame |
#%%
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import numpy.random as random
import gzip
import csv
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
from tqdm import tqdm
from joblib import Parallel, delayed
import networkx as nx
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
# load pairs
pairs = pm.Promat.get_pairs()
ipsi_pair_ids = pm.Promat.load_pairs_from_annotation('mw ipsilateral axon', pairs, return_type='all_pair_ids')
bilateral_pair_ids = pm.Promat.load_pairs_from_annotation('mw bilateral axon', pairs, return_type='all_pair_ids')
contra_pair_ids = pm.Promat.load_pairs_from_annotation('mw contralateral axon', pairs, return_type='all_pair_ids')
dVNC_pair_ids = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids')
dSEZ_pair_ids = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids')
RGN_pair_ids = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids')
sensories_pair_ids = [pm.Promat.load_pairs_from_annotation(x, pairs, return_type='all_pair_ids') for x in pymaid.get_annotated('mw brain inputs').name]
all_sensories = [x for sublist in sensories_pair_ids for x in sublist]
# %%
# EXPERIMENT 1: removing edges from contralateral and bilateral neurons -> effect on path length?
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 40
# excise edges and generate graphs
e_contra_contra, e_contra_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_contra, e_bi_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_ipsi, e_bi_ipsi_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'ipsilateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_all_contra, e_all_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids + contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# this chunk is incomplete
# write all graphs to graphml
# read all graph from graphml
graph = pg.Analyze_Nx_G(all_edges_combined, graph_type='directed')
shuffled_graphs = Parallel(n_jobs=-1)(delayed(nx.readwrite.graphml.read_graphml)(f'interhemisphere/csv/shuffled_graphs/iteration-{i}.graphml', node_type=int, edge_key_type=str) for i in tqdm(range(n_init)))
shuffled_graphs = [pg.Analyze_Nx_G(edges=x.edges, graph=x) for x in shuffled_graphs]
# %%
# generate and save paths
cutoff=5
# generate and save paths for experimental
save_path = [f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra']
experimental = [e_contra_contra, e_bi_contra, e_bi_ipsi, e_all_contra]
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(experimental[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=save_path[i]) for i in tqdm((range(len(experimental)))))
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_contra_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_ipsi_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_all_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths(excise_paths, control_paths, edges_removed):
excise_count = len(excise_paths)
control_counts = [len(x) for x in control_paths]
path_counts_data = []
for row in zip(control_counts, [f'control-{edges_removed}']*len(control_counts)):
path_counts_data.append(row)
path_counts_data.append([excise_count, f'excised-{edges_removed}'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}.csv')
# count per # hops
excise_path_counts = [len(x) for x in excise_paths]
control_path_counts = [[len(x) for x in path] for path in control_paths]
path_counts_length_data = []
for i, path_length in enumerate(control_path_counts):
for row in zip(path_length, [f'control-{edges_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for row in zip(excise_path_counts, [f'excised-{edges_removed}']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = | pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N']) | pandas.DataFrame |
import sys
import os
import math
import datetime
import itertools
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.tsa.stattools import grangercausalitytests
import scipy.stats as stats
from mesa.batchrunner import BatchRunner, BatchRunnerMP
from mesa.datacollection import DataCollector
from project_material.model.network import HostNetwork
class CustomBatchRunner(BatchRunner):
def run_model(self, model):
while model.schedule.steps < self.max_steps:
model.step()
def track_params(model):
return (
model.num_nodes,
model.avg_node_degree,
model.initial_outbreak_size,
model.prob_spread_virus_gamma_shape,
model.prob_spread_virus_gamma_scale,
model.prob_spread_virus_gamma_loc,
model.prob_spread_virus_gamma_magnitude_multiplier,
model.prob_recover_gamma_shape,
model.prob_recover_gamma_scale,
model.prob_recover_gamma_loc,
model.prob_recover_gamma_magnitude_multiplier,
model.prob_virus_kill_host_gamma_shape,
model.prob_virus_kill_host_gamma_scale,
model.prob_virus_kill_host_gamma_loc,
model.prob_virus_kill_host_gamma_magnitude_multiplier,
model.prob_infectious_no_to_mild_symptom_gamma_shape,
model.prob_infectious_no_to_mild_symptom_gamma_scale,
model.prob_infectious_no_to_mild_symptom_gamma_loc,
model.prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_no_to_severe_symptom_gamma_shape,
model.prob_infectious_no_to_severe_symptom_gamma_scale,
model.prob_infectious_no_to_severe_symptom_gamma_loc,
model.prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_infectious_no_to_critical_symptom_gamma_shape,
model.prob_infectious_no_to_critical_symptom_gamma_scale,
model.prob_infectious_no_to_critical_symptom_gamma_loc,
model.prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_no_symptom_gamma_shape,
model.prob_infectious_mild_to_no_symptom_gamma_scale,
model.prob_infectious_mild_to_no_symptom_gamma_loc,
model.prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_severe_symptom_gamma_shape,
model.prob_infectious_mild_to_severe_symptom_gamma_scale,
model.prob_infectious_mild_to_severe_symptom_gamma_loc,
model.prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_critical_symptom_gamma_shape,
model.prob_infectious_mild_to_critical_symptom_gamma_scale,
model.prob_infectious_mild_to_critical_symptom_gamma_loc,
model.prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_no_symptom_gamma_shape,
model.prob_infectious_severe_to_no_symptom_gamma_scale,
model.prob_infectious_severe_to_no_symptom_gamma_loc,
model.prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_mild_symptom_gamma_shape,
model.prob_infectious_severe_to_mild_symptom_gamma_scale,
model.prob_infectious_severe_to_mild_symptom_gamma_loc,
model.prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_critical_symptom_gamma_shape,
model.prob_infectious_severe_to_critical_symptom_gamma_scale,
model.prob_infectious_severe_to_critical_symptom_gamma_loc,
model.prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_no_symptom_gamma_shape,
model.prob_infectious_critical_to_no_symptom_gamma_scale,
model.prob_infectious_critical_to_no_symptom_gamma_loc,
model.prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_mild_symptom_gamma_shape,
model.prob_infectious_critical_to_mild_symptom_gamma_scale,
model.prob_infectious_critical_to_mild_symptom_gamma_loc,
model.prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_severe_symptom_gamma_shape,
model.prob_infectious_critical_to_severe_symptom_gamma_scale,
model.prob_infectious_critical_to_severe_symptom_gamma_loc,
model.prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_recovered_no_to_mild_complication,
model.prob_recovered_no_to_severe_complication,
model.prob_recovered_mild_to_no_complication,
model.prob_recovered_mild_to_severe_complication,
model.prob_recovered_severe_to_no_complication,
model.prob_recovered_severe_to_mild_complication,
model.prob_gain_immunity,
model.hospital_bed_capacity_as_percent_of_population,
model.hospital_bed_cost_per_day,
model.icu_bed_capacity_as_percent_of_population,
model.icu_bed_cost_per_day,
model.ventilator_capacity_as_percent_of_population,
model.ventilator_cost_per_day,
model.drugX_capacity_as_percent_of_population,
model.drugX_cost_per_day,
)
def track_run(model):
return model.uid
class BatchHostNetwork(HostNetwork):
# id generator to track run number in batch run data
id_gen = itertools.count(1)
def __init__(self, num_nodes, avg_node_degree, initial_outbreak_size,
prob_spread_virus_gamma_shape,
prob_spread_virus_gamma_scale,
prob_spread_virus_gamma_loc,
prob_spread_virus_gamma_magnitude_multiplier,
prob_recover_gamma_shape,
prob_recover_gamma_scale,
prob_recover_gamma_loc,
prob_recover_gamma_magnitude_multiplier,
prob_virus_kill_host_gamma_shape,
prob_virus_kill_host_gamma_scale,
prob_virus_kill_host_gamma_loc,
prob_virus_kill_host_gamma_magnitude_multiplier,
prob_infectious_no_to_mild_symptom_gamma_shape,
prob_infectious_no_to_mild_symptom_gamma_scale,
prob_infectious_no_to_mild_symptom_gamma_loc,
prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_severe_symptom_gamma_shape,
prob_infectious_no_to_severe_symptom_gamma_scale,
prob_infectious_no_to_severe_symptom_gamma_loc,
prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_critical_symptom_gamma_shape,
prob_infectious_no_to_critical_symptom_gamma_scale,
prob_infectious_no_to_critical_symptom_gamma_loc,
prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_no_symptom_gamma_shape,
prob_infectious_mild_to_no_symptom_gamma_scale,
prob_infectious_mild_to_no_symptom_gamma_loc,
prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_severe_symptom_gamma_shape,
prob_infectious_mild_to_severe_symptom_gamma_scale,
prob_infectious_mild_to_severe_symptom_gamma_loc,
prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_critical_symptom_gamma_shape,
prob_infectious_mild_to_critical_symptom_gamma_scale,
prob_infectious_mild_to_critical_symptom_gamma_loc,
prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_no_symptom_gamma_shape,
prob_infectious_severe_to_no_symptom_gamma_scale,
prob_infectious_severe_to_no_symptom_gamma_loc,
prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_mild_symptom_gamma_shape,
prob_infectious_severe_to_mild_symptom_gamma_scale,
prob_infectious_severe_to_mild_symptom_gamma_loc,
prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_critical_symptom_gamma_shape,
prob_infectious_severe_to_critical_symptom_gamma_scale,
prob_infectious_severe_to_critical_symptom_gamma_loc,
prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_no_symptom_gamma_shape,
prob_infectious_critical_to_no_symptom_gamma_scale,
prob_infectious_critical_to_no_symptom_gamma_loc,
prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_mild_symptom_gamma_shape,
prob_infectious_critical_to_mild_symptom_gamma_scale,
prob_infectious_critical_to_mild_symptom_gamma_loc,
prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_severe_symptom_gamma_shape,
prob_infectious_critical_to_severe_symptom_gamma_scale,
prob_infectious_critical_to_severe_symptom_gamma_loc,
prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
prob_recovered_no_to_mild_complication,
prob_recovered_no_to_severe_complication,
prob_recovered_mild_to_no_complication,
prob_recovered_mild_to_severe_complication,
prob_recovered_severe_to_no_complication,
prob_recovered_severe_to_mild_complication,
prob_gain_immunity,
hospital_bed_capacity_as_percent_of_population,
hospital_bed_cost_per_day,
icu_bed_capacity_as_percent_of_population,
icu_bed_cost_per_day,
ventilator_capacity_as_percent_of_population,
ventilator_cost_per_day,
drugX_capacity_as_percent_of_population,
drugX_cost_per_day,
):
super().__init__(
num_nodes, avg_node_degree, initial_outbreak_size,
prob_spread_virus_gamma_shape,
prob_spread_virus_gamma_scale,
prob_spread_virus_gamma_loc,
prob_spread_virus_gamma_magnitude_multiplier,
prob_recover_gamma_shape,
prob_recover_gamma_scale,
prob_recover_gamma_loc,
prob_recover_gamma_magnitude_multiplier,
prob_virus_kill_host_gamma_shape,
prob_virus_kill_host_gamma_scale,
prob_virus_kill_host_gamma_loc,
prob_virus_kill_host_gamma_magnitude_multiplier,
prob_infectious_no_to_mild_symptom_gamma_shape,
prob_infectious_no_to_mild_symptom_gamma_scale,
prob_infectious_no_to_mild_symptom_gamma_loc,
prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_severe_symptom_gamma_shape,
prob_infectious_no_to_severe_symptom_gamma_scale,
prob_infectious_no_to_severe_symptom_gamma_loc,
prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_critical_symptom_gamma_shape,
prob_infectious_no_to_critical_symptom_gamma_scale,
prob_infectious_no_to_critical_symptom_gamma_loc,
prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_no_symptom_gamma_shape,
prob_infectious_mild_to_no_symptom_gamma_scale,
prob_infectious_mild_to_no_symptom_gamma_loc,
prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_severe_symptom_gamma_shape,
prob_infectious_mild_to_severe_symptom_gamma_scale,
prob_infectious_mild_to_severe_symptom_gamma_loc,
prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_critical_symptom_gamma_shape,
prob_infectious_mild_to_critical_symptom_gamma_scale,
prob_infectious_mild_to_critical_symptom_gamma_loc,
prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_no_symptom_gamma_shape,
prob_infectious_severe_to_no_symptom_gamma_scale,
prob_infectious_severe_to_no_symptom_gamma_loc,
prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_mild_symptom_gamma_shape,
prob_infectious_severe_to_mild_symptom_gamma_scale,
prob_infectious_severe_to_mild_symptom_gamma_loc,
prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_critical_symptom_gamma_shape,
prob_infectious_severe_to_critical_symptom_gamma_scale,
prob_infectious_severe_to_critical_symptom_gamma_loc,
prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_no_symptom_gamma_shape,
prob_infectious_critical_to_no_symptom_gamma_scale,
prob_infectious_critical_to_no_symptom_gamma_loc,
prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_mild_symptom_gamma_shape,
prob_infectious_critical_to_mild_symptom_gamma_scale,
prob_infectious_critical_to_mild_symptom_gamma_loc,
prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_severe_symptom_gamma_shape,
prob_infectious_critical_to_severe_symptom_gamma_scale,
prob_infectious_critical_to_severe_symptom_gamma_loc,
prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
prob_recovered_no_to_mild_complication,
prob_recovered_no_to_severe_complication,
prob_recovered_mild_to_no_complication,
prob_recovered_mild_to_severe_complication,
prob_recovered_severe_to_no_complication,
prob_recovered_severe_to_mild_complication,
prob_gain_immunity,
hospital_bed_capacity_as_percent_of_population,
hospital_bed_cost_per_day,
icu_bed_capacity_as_percent_of_population,
icu_bed_cost_per_day,
ventilator_capacity_as_percent_of_population,
ventilator_cost_per_day,
drugX_capacity_as_percent_of_population,
drugX_cost_per_day,
)
self.model_reporters_dict.update({'Model params': track_params, 'Run': track_run})
self.datacollector = DataCollector(model_reporters=self.model_reporters_dict)
# parameter lists for each parameter to be tested in batch run
br_params = {
'num_nodes': [500],
'avg_node_degree': [10],
'initial_outbreak_size': [2],
'prob_spread_virus_gamma_shape': [1],
'prob_spread_virus_gamma_scale': [3],
'prob_spread_virus_gamma_loc': [0],
'prob_spread_virus_gamma_magnitude_multiplier': [0.25],
'prob_recover_gamma_shape': [7],
'prob_recover_gamma_scale': [3],
'prob_recover_gamma_loc': [0],
'prob_recover_gamma_magnitude_multiplier': [0.75],
'prob_virus_kill_host_gamma_shape': [5.2],
'prob_virus_kill_host_gamma_scale': [3.2],
'prob_virus_kill_host_gamma_loc': [0],
'prob_virus_kill_host_gamma_magnitude_multiplier': [0.069],
'prob_infectious_no_to_mild_symptom_gamma_shape': [4.1],
'prob_infectious_no_to_mild_symptom_gamma_scale': [1],
'prob_infectious_no_to_mild_symptom_gamma_loc': [0],
'prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier': [0.75],
'prob_infectious_no_to_severe_symptom_gamma_shape': [1],
'prob_infectious_no_to_severe_symptom_gamma_scale': [2],
'prob_infectious_no_to_severe_symptom_gamma_loc': [0],
'prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier': [0.1],
'prob_infectious_no_to_critical_symptom_gamma_shape': [1],
'prob_infectious_no_to_critical_symptom_gamma_scale': [2.8],
'prob_infectious_no_to_critical_symptom_gamma_loc': [0],
'prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier': [0.15],
'prob_infectious_mild_to_no_symptom_gamma_shape': [3],
'prob_infectious_mild_to_no_symptom_gamma_scale': [3],
'prob_infectious_mild_to_no_symptom_gamma_loc': [0],
'prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier': [0.25],
'prob_infectious_mild_to_severe_symptom_gamma_shape': [4.9],
'prob_infectious_mild_to_severe_symptom_gamma_scale': [2.2],
'prob_infectious_mild_to_severe_symptom_gamma_loc': [0],
'prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier': [0.11],
'prob_infectious_mild_to_critical_symptom_gamma_shape': [3.3],
'prob_infectious_mild_to_critical_symptom_gamma_scale': [3.1],
'prob_infectious_mild_to_critical_symptom_gamma_loc': [0],
'prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier': [0.11],
'prob_infectious_severe_to_no_symptom_gamma_shape': [3],
'prob_infectious_severe_to_no_symptom_gamma_scale': [2],
'prob_infectious_severe_to_no_symptom_gamma_loc': [0],
'prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_severe_to_mild_symptom_gamma_shape': [5],
'prob_infectious_severe_to_mild_symptom_gamma_scale': [3],
'prob_infectious_severe_to_mild_symptom_gamma_loc': [0],
'prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_severe_to_critical_symptom_gamma_shape': [7],
'prob_infectious_severe_to_critical_symptom_gamma_scale': [3],
'prob_infectious_severe_to_critical_symptom_gamma_loc': [0],
'prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier': [0.01],
'prob_infectious_critical_to_no_symptom_gamma_shape': [7],
'prob_infectious_critical_to_no_symptom_gamma_scale': [1],
'prob_infectious_critical_to_no_symptom_gamma_loc': [0],
'prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_critical_to_mild_symptom_gamma_shape': [4],
'prob_infectious_critical_to_mild_symptom_gamma_scale': [2],
'prob_infectious_critical_to_mild_symptom_gamma_loc': [0],
'prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_critical_to_severe_symptom_gamma_shape': [5],
'prob_infectious_critical_to_severe_symptom_gamma_scale': [2],
'prob_infectious_critical_to_severe_symptom_gamma_loc': [0],
'prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier': [0.25],
'prob_recovered_no_to_mild_complication': [0.016],
'prob_recovered_no_to_severe_complication': [0],
'prob_recovered_mild_to_no_complication': [0.02],
'prob_recovered_mild_to_severe_complication': [0.02],
'prob_recovered_severe_to_no_complication': [0.001],
'prob_recovered_severe_to_mild_complication': [0.001],
'prob_gain_immunity': [0.005],
'hospital_bed_capacity_as_percent_of_population': [0.10],
'hospital_bed_cost_per_day': [2000],
'icu_bed_capacity_as_percent_of_population': [0.10],
'icu_bed_cost_per_day': [3000],
'ventilator_capacity_as_percent_of_population': [0.1],
'ventilator_cost_per_day': [100],
'drugX_capacity_as_percent_of_population': [0.1],
'drugX_cost_per_day': [20],
}
start_date = datetime.datetime(2020, 2, 20) # Setting
num_iterations = 1 # Setting
num_max_steps_in_reality = 95 # Setting
num_max_steps_in_simulation = 165 # Setting
end_date_in_reality = start_date + datetime.timedelta(days=num_max_steps_in_reality) # 2020-05-25
end_date_in_simulation = start_date + datetime.timedelta(days=num_max_steps_in_simulation) # 2020-09-22 if num_max_steps_in_simulation == 215
try:
br = BatchRunnerMP(BatchHostNetwork,
br_params,
iterations=num_iterations,
max_steps=num_max_steps_in_simulation,
model_reporters={'Data Collector': lambda m: m.datacollector})
except Exception as e:
print('Multiprocessing batch run not applied, reason as:', e)
br = CustomBatchRunner(BatchHostNetwork,
br_params,
iterations=num_iterations,
max_steps=num_max_steps_in_simulation,
model_reporters={'Data Collector': lambda m: m.datacollector})
def main(on_switch=False, graph_switch=False, stats_test_switch=False, save_switch=False,
realworld_prediction_switch=False, filename_tag=''):
if on_switch:
br.run_all()
br_df = br.get_model_vars_dataframe()
br_step_data = pd.DataFrame()
for i in range(len(br_df['Data Collector'])):
if isinstance(br_df['Data Collector'][i], DataCollector):
print('>>>>> Run #{}'.format(i))
i_run_data = br_df['Data Collector'][i].get_model_vars_dataframe()
i_run_data['Date'] = i_run_data.apply(lambda row: convert_time_to_date(row, 'Time', start_date), axis=1)
br_step_data = br_step_data.append(i_run_data, ignore_index=True)
model_param = i_run_data['Model params'][0]
df_real = prepare_realworld_data().copy()
df_real['date_formatted'] = pd.to_datetime(df_real['date_formatted'])
df_real.sort_values(by=['date_formatted'])
df_sim = i_run_data.copy()
df_sim['Date'] = pd.to_datetime(df_sim['Date'])
df_sim.sort_values(by=['Date'])
df_merged = pd.merge(df_real, df_sim, how='outer', left_on=['date_formatted'],
right_on=['Date'])
if graph_switch:
print('>> For graphs')
print('Model param:', model_param)
graphing(df=df_merged)
if stats_test_switch:
print('>> For statistical tests')
print('Model param:', model_param)
df_merged_sliced = df_merged[(df_merged['date_formatted'] >= start_date)
& (df_merged['date_formatted'] <= end_date_in_reality)]
statistical_test_validation(df=df_merged_sliced)
if realworld_prediction_switch:
print('>> For real-world predictions')
print('Model param:', model_param)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='cumulative_cases',
feature_col='Cumulative test-confirmed infectious'
)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='cumulative_deaths',
feature_col='Cumulative test-confirmed dead'
)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='active_cases',
feature_col='Test-confirmed infectious'
)
br_step_data['File ID'] = filename_tag
if save_switch:
br_step_data.to_csv(os.getcwd() +
'\\project_result\\disease_model_step_data{}_p{}.csv'.format(filename_tag, i),
index=False)
df_merged.to_csv(os.getcwd() +
'\\project_result\\disease_model_merged_data{}_p{}.csv'.format(filename_tag, i),
index=False)
# Helper functions
curr_dir = os.getcwd()
covid19_dir = '\\data\Covid19Canada'
covid19_timeseries_prov_dir = covid19_dir+'\\timeseries_prov'
cases_timeseries_filename = 'cases_timeseries_prov.csv'
mortality_timeseries_filename = 'mortality_timeseries_prov.csv'
overall_timeseries_filename = 'active_timeseries_prov.csv'
testing_timeseries_filename = 'testing_timeseries_prov.csv'
project_result_dir = '\\project_result'
output_real_data_filename = 'realworldCovid19_step_data_processed.csv'
popn_factor = 1000000 # Setting
def convert_time_to_date(row, var, start_date):
current_date = start_date + datetime.timedelta(days=(int(row[var]-1)))
return current_date
def get_realworld_data():
path_overall = curr_dir+covid19_timeseries_prov_dir+'\\'+overall_timeseries_filename
path_testing = curr_dir+covid19_timeseries_prov_dir+'\\'+testing_timeseries_filename
df_overall = pd.read_csv(path_overall, encoding='utf-8', low_memory=False)
df_overall.rename(columns={'date_active': 'date'}, inplace=True)
df_testing = pd.read_csv(path_testing, encoding='utf-8', low_memory=False)
df_testing.rename(columns={'date_testing': 'date'}, inplace=True)
df_merged = | pd.merge(df_overall, df_testing, on=['province', 'date'], how='outer') | pandas.merge |
import pandas as pd
import numpy as np
import datetime
name = ['IP', 'app', 'daytime', 'platform', 'channel_type', 'channel', 'user_id',
'device_id', 'system_version', 'brand', 'model', 'version', 'event_id', 'para']
# 如果不是csv(默认逗号分隔)的文件 就需要加sep指定分隔符,否则会分割出\t, 要设定header=None,否则默认使用第一行的数据当做列名
f1 = pd.DataFrame(pd.read_csv('/Users/yuanfang/Desktop/download/logs/1/2/3/2/2019/3/26/2/logs12405.log',
sep='\t', header=None, names=name))
f2 = pd.DataFrame(pd.read_csv('/Users/yuanfang/Desktop/download/logs/1/2/3/2/2019/3/27/2/logs12405.log',
sep='\t', header=None, names=name))
# 然后把默认的列名重命名
# 分批读取大数据 DataFrame没有这个方法
# 可以通过设置chunksize大小分批读入,也可以设置iterator=True后通过get_chunk选取任意行。
# chunk = df.get_chunk(2)
# 打印显示所有列
# pd.set_option('display.max_columns', None)
# print(a.shape)
# 分组以某列的数据为标准 去统计相对应的数量
# question1 :1、累计注册用户量:截止到某一时间点,累计注册用户数量(注册事件event_id=1)
f2['daytime'] = pd.to_datetime(f2['daytime']).dt.normalize() #去掉时、分、秒 保留日期
register = f2[(f2['daytime'] == '2019-03-27')].event_id.count()
print('当日注册用户量:%s' %register)
# question2 :2、日活:过去一天启动过应用的用户数(去重),启动过一次的用户即视为活跃用户,包括新用户与老用户
# 将df2中的行添加到df1的尾部 ignore_index 会接上面的数据行号
# a = df.append(df2, ignore_index=True) '或者' a = pd.concat([df, df2], ignore_index=True)
# start = a.groupby(['datetime'])['event_id'].count()
# print(a)
# 这个drop_duplicate方法是对DataFrame格式的数据,去除特定列下面的重复行。返回DataFrame格式的数据。
start = f1.drop_duplicates('user_id', 'first')['event_id'].count()
print('当日启动用户量:%s' %start)
# question3 :3、次日留存:当日新增用户中次日继续使用应用的用户数/当日新增用户数*100%
# a = pd.isnull(res)
# b = res[a==False].reset_index(drop=True)
# 取出用户在五月之前的记录
# d_new = res[(res['datetime'] < '2019-05-01')].sort_values(by='datetime')
# d_all = pd.merge(d_new,res,how='left',on='event_id')
df = pd.concat([f1, f2], ignore_index=True) # 把数据连接起来
df['daytime'] = | pd.to_datetime(df['daytime']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
| assert_frame_equal(result, df) | pandas.util.testing.assert_frame_equal |
from cgitb import enable
import os
import json
from lightgbm import early_stopping
from tabulate import tabulate
from functools import partial
from IPython.display import display
from tqdm.auto import tqdm
import numpy as np
import xgboost as xgb
from .logger import logger
from sklearn.preprocessing import LabelEncoder, LabelBinarizer, OrdinalEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from src.params import get_params
from .cfg import CFG
from .models import fetch_model, Metrics
import pandas as pd
import optuna
import joblib
from wandb.xgboost import wandb_callback
from wandb.lightgbm import wandb_callback, log_summary
optuna.logging.set_verbosity(optuna.logging.INFO)
def reduce_mem_usage(df):
"""
iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
logger.info('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
logger.info('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
logger.info('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def null_checker(data):
for col in data.columns:
if data[col].isnull().sum() > 0:
display(f"null col: {col} >> total null : {data[col].isnull().sum()}")
# if data[col].dtype == "object":
data[col] = data[col].fillna(method='ffill')
# else:
# data[col] = data[col].fillna(data[col].mean())
display(f"after fillup null: {col} >> total null : {data[col].isnull().sum()}")
def label_encode(df, target, model_config):
pass
# if is_train:
# df[target] = lbl_encoder.transform(df[target])
def categorical_data(df, model):
pass
def normal_data_split(df, label, random_state, shuffle, test_size):
X = df.drop(label, axis=1)
y = df[label]
xtrain, xtest, ytrain, ytest = train_test_split(
X, y,
random_state=random_state,
shuffle=shuffle,
test_size=test_size
)
return xtrain, xtest, ytrain, ytest
# Function for mean value of metrics
def dict_mean(dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def kaggle_submission(model_config, final_pred):
ans = np.mean(final_pred, axis = 0)
# print(ans.shape)
sub_df = pd.read_csv(model_config["submission_path"])
sub_df[model_config["label"]] = ans
sub_df.to_csv(f'{os.path.join(model_config["output_path"], model_config["folder_output"])}/submission.csv', index=False)
logger.info(">>> Submission For Kaggle Is Prepared. Please Cross-check")
def submission_test(model_config, sub_pred_values):
# sub_pred_ = np.mean(sub_pred_values, axis=0)
# print(sub_pred_.shape)
d = {
'label': sub_pred_values
}
df = | pd.DataFrame.from_dict(data=d, orient='index') | pandas.DataFrame.from_dict |
import numpy as np
import matplotlib.pylab as plt
import pandas as pd
import scipy.signal as signal
#Concatenación de los datos
data1 = pd.read_csv("transacciones2008.txt",sep = ";",names=['Fecha','Hora','Conversion','Monto'],decimal =",")
data2 = pd.read_csv("transacciones2009.txt",sep = ";",names=['Fecha','Hora','Conversion','Monto'],decimal =",")
data3 = pd.read_csv("transacciones2010.txt",sep = ";",names=['Fecha','Hora','Conversion','Monto'],decimal =",")
a = data1["Fecha"].str.split(" ",expand = True)
b = data1["Hora"].str.split(" ",expand = True)
c = data2["Fecha"].str.split(" ",expand = True)
d = data2["Hora"].str.split(" ",expand = True)
e = data3["Fecha"].str.split(" ",expand = True)
f = data3["Hora"].str.split(" ",expand = True)
n1 = pd.DataFrame({'Fecha': a[0] + " " + b[1],'Conversion':data1["Conversion"],'Monto':data1["Monto"]})
n2 = pd.DataFrame({'Fecha': c[0] + " " + d[1],'Conversion':data2["Conversion"],'Monto':data2["Monto"]})
n3 = pd.DataFrame({'Fecha': e[0] + " " + f[1],'Conversion':data3["Conversion"],'Monto':data3["Monto"]})
data = pd.concat([n1,n2,n3],ignore_index = True)
data["Fecha"] = | pd.to_datetime(data["Fecha"],format='%d/%m/%Y %H:%M:%S') | pandas.to_datetime |
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pandas_extras import (
concatenate_columns, expand_list, expand_lists,
extract_dict_key, extract_dictionary, merge_columns,
)
class TransformationsTestCase(unittest.TestCase):
def test_expand_list_pos_01(self):
df = pd.DataFrame(
{
'test_index': [1, 2, 3, 4, 5, 6],
'trial_num': [1, 2, 3, 1, 2, 3],
'subject': [1, 1, 1, 2, 2, 2],
'samples': [
[1, 2, 3, 4],
[1, 2, 3],
[1, 2],
[1],
[],
None,
]
}
).set_index('test_index')
expected = pd.DataFrame(
{
'newcol': [1, 2, 3, 4, 1, 2, 3, 1, 2, 1, None, None],
'subject': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],
'trial_num': [1, 1, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3],
'test_index': [1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6]
}
)
assert_frame_equal(expand_list(df, 'samples', 'newcol').reset_index(),
expected, check_like=True, check_dtype=False)
def test_expand_list_pos_02(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 3, 1, 2, 3],
'subject': [1, 1, 1, 2, 2, 2],
'samples': [
[1, 2, 3, 4],
[1, 2, 3],
[1, 2],
[1],
[],
None,
]
}
).set_index(['trial_num', 'subject'])
expected = pd.DataFrame(
{
'samples': [1, 2, 3, 4, 1, 1, 2, 3, None, 1, 2, None],
'subject': [1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2],
'trial_num': [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]
}
)
assert_frame_equal(expand_list(df, 'samples').reset_index(), expected, check_like=True)
def test_expand_list_pos_03(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 3, 1, 2, 3],
'subject': [1, 1, 1, 2, 2, 2],
'samples': [
[1, 2, 3, 4],
[1, 2, 3],
[1, 2],
[1],
[],
np.NaN,
]
}
).set_index(['trial_num', 'subject'])
expected = pd.DataFrame(
{
'samples': [1, 2, 3, 4, 1, 1, 2, 3, None, 1, 2, None],
'subject': [1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2],
'trial_num': [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]
}
)
assert_frame_equal(expand_list(df, 'samples').reset_index(), expected, check_like=True)
def test_expand_lists_pos_01(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 3, 1, 2, 3],
'subject': [1, 1, 1, 2, 2, 2],
'samples': [
[1, 2, 3, 4],
[1, 2, 3],
[1, 2],
[1],
[],
None,
],
'samples2': [
[1, 2, 3, 4],
[1, 2, 3],
[1, 2],
[1],
[],
None,
]
}
)
expected = pd.DataFrame(
{
'newcol': [1, 2, 3, 4, 1, 2, 3, 1, 2, 1, None, None],
'newcol2': [1, 2, 3, 4, 1, 2, 3, 1, 2, 1, None, None],
'subject': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],
'trial_num': [1, 1, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3]
}
)
assert_frame_equal(
expand_lists(df, ['samples', 'samples2'], ['newcol', 'newcol2']).reset_index().drop('index', axis=1),
expected,
check_like=True
)
def test_expand_lists_pos_02(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 3, 1, 2, 3],
'subject': [1, 1, 1, 2, 2, 2],
'samples': [
[1, 2, 3, 4],
[1, 2, 3],
[1],
[1],
[],
None,
],
'samples2': [
[1, 2],
[3],
[1, 2],
[1],
[],
None,
]
}
).set_index(['trial_num', 'subject'])
expected = pd.DataFrame(
{
'samples': [1, 2, 3, 4, 1, 1, 2, 3, None, 1, None, None],
'samples2': [1, 2, None, None, 1, 3, None, None, None, 1, 2, None],
'subject': [1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2],
'trial_num': [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]
}
)
assert_frame_equal(expand_lists(df, ['samples', 'samples2']).reset_index(), expected, check_like=True)
def test_expand_lists_pos_03(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 3, 1, 2, 3],
'subject': [1, 1, 1, 2, 2, 2],
'samples': [
[{'testkey': 1}, {'testkey': 2}, {'testkey': 3}, {'testkey': 4}],
[{'testkey': 1}, {'testkey': 2}, {'testkey': 3}],
[{'testkey': 1}, {'testkey': 2}],
[{'testkey': 1}],
[],
None,
],
'other_samples': [
[1, 2, 3, 4],
[1, 2, 3],
[1, 2],
[1],
[],
None,
]
}
)
expected = pd.DataFrame(
{
'newcol': [{'testkey': 1.0}, {'testkey': 2.0}, {'testkey': 3.0}, {'testkey': 4.0},
{'testkey': 1.0}, {'testkey': 2.0}, {'testkey': 3.0}, {'testkey': 1.0},
{'testkey': 2.0}, {'testkey': 1.0}, None, None],
'newcol2': [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 1.0, 2.0, 1.0, None, None],
'subject': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],
'trial_num': [1, 1, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3]
}
)
assert_frame_equal(
expand_lists(df, ['samples', 'other_samples'], ['newcol', 'newcol2']).reset_index(drop=True),
expected,
check_like=True
)
def test_expand_lists_pos_04(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 3, 1, 2, 3],
'subject': [1, 1, 1, 2, 2, 2],
'samples': [
[{'testkey': 1}, {'testkey': 2}, {'testkey': 3}, {'testkey': 4}],
[{'testkey': 1}, {'testkey': 2}, {'testkey': 3}],
[{'testkey': 1}, {'testkey': 2}],
[{'testkey': 1}],
[],
['this will be NaN, as None is not iterable'],
],
'other_samples': [
[1, 2, 3, 4],
[1, 2, 3],
[1, 2],
[],
[1],
None,
]
}
)
expected = pd.DataFrame(
{
'newcol': [{'testkey': 1.0}, {'testkey': 2.0}, {'testkey': 3.0}, {'testkey': 4.0},
{'testkey': 1.0}, {'testkey': 2.0}, {'testkey': 3.0}, {'testkey': 1.0},
{'testkey': 2.0}, {'testkey': 1.0}, None, None],
'newcol2': [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 1.0, 2.0, None, 1.0, None],
'subject': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],
'trial_num': [1, 1, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3]
}
)
assert_frame_equal(
expand_lists(df, ['samples', 'other_samples'], ['newcol', 'newcol2']).reset_index(drop=True),
expected,
check_like=True
)
def test_extract_dict_key_pos_01(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
None,
]
}
)
expected = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
None,
],
'samples.A': [1, 3, 6, None]
}
)
assert_frame_equal(extract_dict_key(df, 'samples', 'A').reset_index(drop=True), expected, check_like=True)
def test_extract_dict_key_pos_02(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
{'B': 8, 'C': None},
]
}
)
expected = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
{'B': 8, 'C': None},
],
'newcol': [1, 3, 6, None]
}
)
assert_frame_equal(
extract_dict_key(df, 'samples', 'A', 'newcol').reset_index(drop=True),
expected, check_like=True
)
def test_extract_dict_key_pos_03(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [np.NaN, np.NaN, np.NaN, np.NaN]
}
)
expected = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [np.NaN, np.NaN, np.NaN, np.NaN],
'newcol': [np.NaN, np.NaN, np.NaN, np.NaN]
}
)
assert_frame_equal(
extract_dict_key(df, 'samples', 'A', 'newcol').reset_index(drop=True),
expected, check_like=True
)
def test_extract_dict_key_pos_04(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
}
)
with self.assertRaises(KeyError):
extract_dict_key(df, 'samples', 'A', 'newcol')
def test_extract_dict_key_pos_05(self):
df = pd.DataFrame(
columns=('trial_num', 'subject', 'samples')
)
self.assertIn('newcol', extract_dict_key(df, 'samples', 'A', 'newcol').columns.to_list())
def test_extract_dictionary_pos_01(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
None,
]
}
)
expected = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples.A': [1, 3, 6, None],
'samples.B': [2, 4, 7, None],
}
)
assert_frame_equal(
extract_dictionary(df, 'samples', ['A', 'B']).reset_index(drop=True),
expected, check_like=True
)
def test_extract_dictionary_pos_02(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
None,
]
}
)
expected = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'newcol.A': [1, 3, 6, None],
'newcol.B': [2, 4, 7, None],
}
)
assert_frame_equal(
extract_dictionary(df, 'samples', ['A', 'B'], 'newcol').reset_index(drop=True),
expected, check_like=True
)
def test_extract_dictionary_pos_03(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
None,
]
}
)
expected = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples.A': [1, 3, 6, None],
'samples.B': [2, 4, 7, None],
'samples.C': [None, 5, None, None]
}
)
assert_frame_equal(extract_dictionary(df, 'samples').reset_index(drop=True), expected, check_like=True)
def test_extract_dictionary_pos_04(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
None,
]
}
)
expected = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'A': [1, 3, 6, None],
'B': [2, 4, 7, None],
'C': [None, 5, None, None]
}
)
assert_frame_equal(
extract_dictionary(df, 'samples', prefix='').reset_index(drop=True),
expected, check_like=True
)
def test_extract_dictionary_pos_05(self):
df = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [None, None, None, None]
}
)
expected = pd.DataFrame(
{
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2]
}
)
assert_frame_equal(
extract_dictionary(df, 'samples', prefix='').reset_index(drop=True),
expected, check_like=True
)
def test_extract_dictionary_pos_06(self):
df = pd.DataFrame({
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'samples': [
None,
{'A': 1, 'B': 2, 'C': None},
{'A': 3, 'B': 4, 'C': 5},
{'A': 6, 'B': 7, 'C': None},
]
})
expected = pd.DataFrame({
'trial_num': [1, 2, 1, 2],
'subject': [1, 1, 2, 2],
'A': [None, 1, 3, 6],
'B': [None, 2, 4, 7],
'C': [None, None, 5, None]
})
assert_frame_equal(
extract_dictionary(df, 'samples', prefix='').reset_index(drop=True),
expected, check_like=True
)
def test_merge_columns(self):
dataframe = pd.DataFrame([
{
'test_1': pd.NaT,
'test_2': [],
'test_3': 'TEST',
'test_4': 'TEST2'
},
{
'test_1': 'TEST3',
'test_2': ['TEST'],
'test_3': 'TEST',
'test_4': 'TEST2'
},
{
'test_1': np.NaN,
'test_2': None,
'test_3': 'TEST5',
'test_4': 'TEST6'
}
])
expected_result_first = pd.DataFrame([
{
'test_1': None,
'test_2': [],
'test_3': 'TEST',
'test_4': 'TEST2',
'new_col_name': 'TEST'
},
{
'test_1': 'TEST3',
'test_2': ['TEST'],
'test_3': 'TEST',
'test_4': 'TEST2',
'new_col_name': 'TEST3'
},
{
'test_1': None,
'test_2': None,
'test_3': 'TEST5',
'test_4': 'TEST6',
'new_col_name': 'TEST5'
}
])
expected_result_last = pd.DataFrame([
{
'test_1': None,
'test_2': [],
'test_3': 'TEST',
'test_4': 'TEST2',
'new_col_name': 'TEST2'
},
{
'test_1': 'TEST3',
'test_2': ['TEST'],
'test_3': 'TEST',
'test_4': 'TEST2',
'new_col_name': 'TEST2'
},
{
'test_1': None,
'test_2': None,
'test_3': 'TEST5',
'test_4': 'TEST6',
'new_col_name': 'TEST6'
}
])
merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', keep='first')
assert_frame_equal(dataframe, expected_result_first, check_like=True)
merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', keep='last')
assert_frame_equal(dataframe, expected_result_last, check_like=True)
with self.assertRaises(ValueError):
merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', keep='something_wrong')
with self.assertRaises(ValueError):
merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', aggr=sum, keep='first')
def test_merge_columns_aggr(self):
dataframe = pd.DataFrame([
{
'test_1': 1,
'test_2': [],
'test_3': 5,
'test_4': 9
},
{
'test_1': 0,
'test_2': ['TEST'],
'test_3': 9,
'test_4': 7
},
{
'test_1': 1,
'test_2': None,
'test_3': 8,
'test_4': 1
}
])
expected_result = pd.DataFrame([
{
'test_1': 1,
'test_2': [],
'test_3': 5,
'test_4': 9,
'new_col_name': 15
},
{
'test_1': 0,
'test_2': ['TEST'],
'test_3': 9,
'test_4': 7,
'new_col_name': 16
},
{
'test_1': 1,
'test_2': None,
'test_3': 8,
'test_4': 1,
'new_col_name': 10
}
])
merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', aggr=sum)
| assert_frame_equal(dataframe, expected_result, check_like=True, check_dtype=False) | pandas.testing.assert_frame_equal |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = | pd.Series([175., 100., 200.], dtype='float') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 13:21:36 2019
@author: mt01034
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import confusion_matrix
from KNNImplement import MyKNeighborsClassifier as myKNeighborsClassifier
from sklearn.model_selection import train_test_split
import atexit
import sys
result_table=pd.DataFrame(columns=['distance','weight','k','accuracy','featurenames'])
def sens_preci(mc):
m = []
for i in range(mc.shape[0]):
sensitivity=(mc[i,i]/float(sum(mc[i,:])+0.000000000000001))
precision=(mc[i,i]/float(sum(mc[:,i])+0.000000000000001))
m += [[sensitivity,precision]]
return m
def MSKNN(X,y,distance,weight,kMax=10,bfeatures=1,fNum=15):
if (bfeatures == 1):
MCKNNSelectFeatures(X,y,distance,weight,kMax,fNum)
else:
KNN(X,y,kMax)
def MCKNNSelectFeatures(X,y,dtype,w,kMax,fNum):
features= []
featurenames=[]
selectedscorelist = []
count=0
distance = ['manhattan','euclidean','minkowski','cosine','canberra','lorentzian','sorensen','hamming','JaccardModif']
weight = ['uniform','distance','distancesquare']
print ("dtype : "+str(distance[dtype])+"weight : "+str([w]))
for k in range(1,kMax):
knnclassifier = myKNeighborsClassifier(n_neighbors=k,weights=weight[w],distancetype =distance[dtype])
for j in range(fNum):
scoref=np.zeros(len(X.columns))
for i, feature in enumerate(X.columns):
cv = LeaveOneOut()
if i in features:
#print ("jump")
continue
nF = [feature] + list(X.columns[features])
scores = cross_val_score(knnclassifier, np.array(X[nF]), np.array(y), cv=cv) #scoring='f1_macro' cv=12
#print ("nF value :" + str(nF)+ "\n i="+str(i)+" \n score:"+str(scores))
scoref[i] = np.mean(scores)
ms = max(scoref)
if ms ==1:
selectedscorelist += [max(scoref)]
features += [np.argmax(scoref)]
featurenames.insert(j,X.columns[np.argmax(scoref)])
result_table.loc[count,['distance','weight','k','accuracy','featurenames']]=[dtype,w,k,max(scoref),featurenames]
break
selectedscorelist += [max(scoref)]
features += [np.argmax(scoref)]
featurenames.insert(j,X.columns[np.argmax(scoref)])
result_table.loc[count,['distance','weight','k','accuracy','featurenames']]=[dtype,w,k,max(scoref),featurenames]
count=count+1
print_full (result_table)
print_full (result_table)
def KNN(X,Y,kmax=10):
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25)
count=0
distance = ['manhattan','euclidean','minkowski','cosine','canberra','lorentzian','sorensen','hamming','JaccardModif']
weight = ['uniform','distance','distancesquare']
for i, dtype in enumerate(distance):
for j,w in enumerate(weight):
for k in range(1,kmax):
classifier = myKNeighborsClassifier(n_neighbors=k,weights=w,distancetype =dtype)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
accuracy = sum(y_pred==y_test)/float(len(y_test))
mc=confusion_matrix(y_test, y_pred)
sp=(np.array(sens_preci(mc)))
count=count+1
#print(classification_report(y_test, y_pred)) #F1 Score = 2*(Recall * Precision) / (Recall + Precision) #Precison==sensitivity
print_full (result_table)
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
f= open("KNNResult.txt","w+")
f.write(str(x))
f.close()
pd.reset_option('display.max_rows')
def printonExit():
print_full (result_table)
f= open("KNNResult.txt","w+")
f.write("final")
f.write(str(result_table))
f.close()
#####################################################
pathogendata= | pd.read_csv("output_filename.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Run the MRIA Model for a given set of disruptions.
"""
import os
import numpy as np
import pandas as pd
from vtra.mria.disruption import create_disruption
from vtra.mria.model import MRIA_IO as MRIA
from vtra.mria.table import io_basic
from vtra.utils import load_config
def estimate_losses(input_file):
"""Estimate the economic losses for a given set of failure scenarios
Parameters
- input_file - String name of input file to failure scenarios
Outputs
- .csv file with total losses per failure scenario
"""
print('{} started!'.format(input_file))
data_path, output_path = load_config()['paths']['data'], load_config()[
'paths']['calc'], load_config()['paths']['output']
# Set booleans
if 'min' in input_file:
min_rice = True
elif 'max' in input_file:
min_rice = False
if 'single' in input_file:
single_point = True
elif 'multiple' in input_file:
single_point = False
# Specify file path
if min_rice == True:
filepath = os.path.join(data_path, 'input_data', 'IO_VIETNAM_MIN.xlsx')
else:
filepath = os.path.join(data_path, 'input_data', 'IO_VIETNAM_MAX.xlsx')
# Create data input
DATA = io_basic('Vietnam', filepath, 2010)
DATA.prep_data()
# Run model and create some output
output = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 5 15:33:50 2019
@author: luc
"""
#%% Import Libraries
import numpy as np
import pandas as pd
import itertools
from stimuli_dictionary import cued_stim, free_stim, cued_stim_prac, free_stim_prac
def randomize(ID, Age, Gender, Handedness):
'''
Create a randomized and counterbalanced stimulus list for the current participant
Parameters
----------
ID : INT
The subject ID. Based on the subject ID the correct counterbalancing is determined
Returns
-------
design : Pandas DataFame
The dataframe containing the complete stimulus list (including practice trials)
keys: Dictionary
the response keys for the free phase
'''
#%% Variables
# experiment variables
nBlocks = 6
Phases = ['prac_cued', 'prac_free', 'cued', 'free']
nstim = 60 # sample 60 stim from each target_type
# sample from main stimulus set without replacement
# randomize word targets to avoid relationship reward - stimulus
for idx, name in enumerate(['lism','lila','nosm','nola']):
cued_stim[name] = np.random.choice(cued_stim[name], size = nstim, replace = False)
wide_cued = pd.DataFrame(cued_stim); wide_free = pd.DataFrame(free_stim)
wide_cued_prac = pd.DataFrame(cued_stim_prac); wide_free_prac = pd.DataFrame(free_stim_prac)
#%% Counterbalancing
# counterbalancing factors
cue_size = ['vowel','consonant'] # for cued trials only
reward_group = ['repeat','switch'] # in cued trials
rhand_parity = ['left','right'] # response hand: for free trials only
keys = ['cue_size', 'reward_group', 'rhand_parity'] # dictionary keys
# cartesian product of factors, then, index by taking modulus of subj ID by N combinations
counterbalancing = [dict(zip(keys,combination)) for combination in itertools.product(cue_size,reward_group,rhand_parity)]
counterbalancing = counterbalancing[ID%8]
# dictionary to retrieve the mapping from the current counterbalancing later on
if counterbalancing['cue_size'] == 'vowel':
cues = {'size': ['A','E','I','O','U'], 'animacy': ['V','F','L','Q','C']}
else:
cues = {'size': ['V','F','L','Q','C'], 'animacy': ['A','E','I','O','U']}
if counterbalancing['rhand_parity'] == 'left':
keys = {'parity': {'left': 's', 'right': 'd'}, 'nsize': {'left': 'k', 'right': 'l'}}
else:
keys = {'parity': {'left': 'k', 'right': 'l'}, 'nsize': {'left': 's', 'right': 'd'}}
#%% Generate the main stimulus list
# (!!! half the current stimulus list controlling for word length and freq)
# main loop across all blocks, phases and trials
D = | pd.DataFrame() | pandas.DataFrame |
import os
import ubelt as ub
import numpy as np
import netharn as nh
import torch
import torchvision
import itertools as it
import utool as ut
import glob
from collections import OrderedDict
import parse
def _auto_argparse(func):
"""
Transform a function with a Google Style Docstring into an
`argparse.ArgumentParser`. Custom utility. Not sure where it goes yet.
"""
from xdoctest import docscrape_google as scrape
import argparse
import inspect
# Parse default values from the function dynamically
spec = inspect.getargspec(func)
kwdefaults = dict(zip(spec.args[-len(spec.defaults):], spec.defaults))
# Parse help and description information from a google-style docstring
docstr = func.__doc__
description = scrape.split_google_docblocks(docstr)[0][1][0].strip()
google_args = {argdict['name']: argdict
for argdict in scrape.parse_google_args(docstr)}
# Create the argument parser and register each argument
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
for arg in spec.args:
argkw = {}
if arg in kwdefaults:
argkw['default'] = kwdefaults[arg]
if arg in google_args:
garg = google_args[arg]
argkw['help'] = garg['desc']
try:
argkw['type'] = eval(garg['type'], {})
except Exception:
pass
parser.add_argument('--' + arg, **argkw)
return parser
def fit(dbname='PZ_MTEST', nice='untitled', dim=416, bsize=6, bstep=4,
lr=0.001, decay=0.0005, workers=0, xpu='cpu', epoch='best', thres=0.5):
"""
Train a siamese chip descriptor for animal identification.
Args:
dbname (str): Name of IBEIS database to use
nice (str): Custom tag for this run
dim (int): Width and height of the network input
bsize (int): Base batch size. Number of examples in GPU at any time.
bstep (int): Multiply by bsize to simulate a larger batches.
lr (float): Base learning rate
decay (float): Weight decay (L2 regularization)
workers (int): Number of parallel data loader workers
xpu (str): Device to train on. Can be either `'cpu'`, `'gpu'`, a number
indicating a GPU (e.g. `0`), or a list of numbers (e.g. `[0,1,2]`)
indicating multiple GPUs
# epoch (int): epoch number to evaluate on
# thres (float): threshold for accuracy and mcc calculation
"""
# There has to be a good way to use argparse and specify params only once.
# Pass all args down to comparable_vamp
import inspect
kw = ub.dict_subset(locals(), inspect.getargspec(fit).args)
comparable_vamp(**kw)
def comparable_vamp(**kwargs):
import parse
import glob
from ibeis.algo.verif import vsone
parse.log.setLevel(30)
from netharn.examples.siam_ibeis import randomized_ibeis_dset
from netharn.examples.siam_ibeis import SiameseLP, SiamHarness, setup_harness
dbname = ub.argval('--db', default='GZ_Master1')
nice = ub.argval('--nice',default='untitled')
# thres = ub.argval('--thres',default=0.5)
dim = 512
datasets = randomized_ibeis_dset(dbname, dim=dim)
class_names = ['diff', 'same']
workdir = ub.ensuredir(os.path.expanduser(
'~/data/work/siam-ibeis2/' + dbname))
task_name = 'binary_match'
datasets['test'].pccs
datasets['train'].pccs
# pblm = vsone.OneVsOneProblem.from_empty('PZ_MTEST')
ibs = datasets['train'].infr.ibs
labeled_aid_pairs = [datasets['train'].get_aidpair(i)
for i in range(len(datasets['train']))]
pblm_train = vsone.OneVsOneProblem.from_labeled_aidpairs(
ibs, labeled_aid_pairs, class_names=class_names,
task_name=task_name,
)
test_labeled_aid_pairs = [datasets['test'].get_aidpair(i)
for i in range(len(datasets['test']))]
pblm_test = vsone.OneVsOneProblem.from_labeled_aidpairs(
ibs, test_labeled_aid_pairs, class_names=class_names,
task_name=task_name,
)
harn = setup_harness(dbname=dbname)
harn.initialize()
margin = harn.hyper.criterion_params['margin']
vamp_res = vamp(pblm_train, workdir, pblm_test)
# ----------------------------
# Evaluate the siamese dataset
pretrained = 'resnet50'
branch = getattr(torchvision.models, pretrained)(pretrained=False)
model = SiameseLP(p=2, branch=branch, input_shape=(1, 3, dim, dim))
#if torch.cuda.is_available():
xpu = nh.XPU.cast(kwargs.get('xpu','cpu'))#xpu_device.XPU.from_argv()
print('Preparing to predict {} on {}'.format(model.__class__.__name__,xpu))
xpu.move(model)
train_dpath ='/home/angelasu/work/siam-ibeis2/' + dbname + '/fit/nice/' + nice
print(train_dpath)
epoch = ub.argval('--epoch', default=None)
epoch = int(epoch) if epoch is not None and epoch != 'best' and epoch != 'recent' and epoch != 'all' else epoch
max_roc = 0
siam_res_arr = []
dist_arr_ret = []
if epoch == 'all':
# max_roc = 0
# siam_res_arr = []
for file in sorted(glob.glob(train_dpath + '/*/_epoch_*.pt')):
print(file)
load_path = file
dist_arr, max_roc, siam_res_arr = siam(load_path, xpu, model, pblm_test, datasets, margin, max_roc, siam_res_arr, dist_arr_ret)
siam_res = siam_res_arr[-1]
else:
load_path = get_snapshot(train_dpath, epoch=epoch)
dist_arr, siam_res = siam(load_path, xpu, model, pblm_test, datasets, margin, max_roc, siam_res_arr, dist_arr_ret)
thres = ub.argval('--thres', default=0.5)
thres = float(thres)
thres_range = np.linspace(thres-0.05, thres+0.05,41)
for val in thres_range:
print('threshold value = {!r}'.format(val))
p_same = torch.sigmoid(torch.Tensor(-(dist_arr-margin))).numpy()-(val-0.5)
p_diff = 1 - p_same
# y_pred = (dist_arr <= 4)
import pandas as pd
| pd.set_option("display.max_rows", None) | pandas.set_option |
import pkg_resources
import pandas as pd
from unittest.mock import sentinel
import osmo_jupyter.dataset.parse as module
def test_parses_ysi_csv_correctly(tmpdir):
test_ysi_classic_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_ysi_classic.csv"
)
formatted_ysi_data = module.parse_ysi_proodo_file(test_ysi_classic_file_path)
expected_ysi_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"YSI barometric pressure (mmHg)": 750,
"YSI DO (% sat)": 19,
"YSI temperature (C)": 24.7,
"YSI unit ID": "unit ID",
}
]
).set_index("timestamp")
pd.testing.assert_frame_equal(formatted_ysi_data, expected_ysi_data)
def test_parses_ysi_kordss_correctly(tmpdir):
test_ysi_kordss_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_ysi_kordss.csv"
)
formatted_ysi_data = module.parse_ysi_prosolo_file(test_ysi_kordss_file_path)
expected_ysi_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"YSI barometric pressure (mmHg)": 750,
"YSI DO (% sat)": 60,
"YSI DO (mg/L)": 6,
"YSI temperature (C)": 24.7,
}
]
).set_index("timestamp")
pd.testing.assert_frame_equal(formatted_ysi_data, expected_ysi_data)
def test_parses_picolog_csv_correctly():
test_picolog_file_path = pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
formatted_picolog_data = module.parse_picolog_file(test_picolog_file_path)
expected_picolog_data = pd.DataFrame(
[
{
"timestamp": pd.to_datetime("2019-01-01 00:00:00"),
"PicoLog temperature (C)": 39,
"PicoLog barometric pressure (mmHg)": 750,
},
{
"timestamp": pd.to_datetime("2019-01-01 00:00:02"),
"PicoLog temperature (C)": 40,
"PicoLog barometric pressure (mmHg)": 750,
},
{
"timestamp": | pd.to_datetime("2019-01-01 00:00:04") | pandas.to_datetime |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal( | read_hdf(path, "df") | pandas.read_hdf |
__author__ = "<NAME>"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path = "../data/localData/"
nc = | pd.read_csv(path+"newCasesWithClass.csv") | pandas.read_csv |
import os
import tempfile
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import simulation as sim
from ...utils.testing import assert_frames_equal
def setup_function(func):
sim.clear_sim()
sim.enable_cache()
def teardown_function(func):
sim.clear_sim()
sim.enable_cache()
@pytest.fixture
def df():
return pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6]},
index=['x', 'y', 'z'])
def test_tables(df):
wrapped_df = sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
assert set(sim.list_tables()) == {'test_frame', 'test_func'}
table = sim.get_table('test_frame')
assert table is wrapped_df
assert table.columns == ['a', 'b']
assert table.local_columns == ['a', 'b']
assert len(table) == 3
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a)
pdt.assert_series_equal(table.a, df.a)
pdt.assert_series_equal(table['b'], df['b'])
table = sim._TABLES['test_func']
assert table.index is None
assert table.columns == []
assert len(table) is 0
pdt.assert_frame_equal(table.to_frame(), df / 2)
pdt.assert_frame_equal(table.to_frame(columns=['a']), df[['a']] / 2)
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a / 2)
pdt.assert_series_equal(table.a, df.a / 2)
pdt.assert_series_equal(table['b'], df['b'] / 2)
assert len(table) == 3
assert table.columns == ['a', 'b']
def test_table_func_cache(df):
sim.add_injectable('x', 2)
@sim.table(cache=True)
def table(variable='x'):
return df * variable
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.add_injectable('x', 3)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.get_table('table').clear_cached()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.add_injectable('x', 4)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.clear_cache()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 4)
sim.add_injectable('x', 5)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 4)
sim.add_table('table', table)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 5)
def test_table_func_cache_disabled(df):
sim.add_injectable('x', 2)
@sim.table('table', cache=True)
def asdf(x):
return df * x
sim.disable_cache()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.add_injectable('x', 3)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.enable_cache()
sim.add_injectable('x', 4)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
def test_table_copy(df):
sim.add_table('test_frame_copied', df, copy_col=True)
sim.add_table('test_frame_uncopied', df, copy_col=False)
sim.add_table('test_func_copied', lambda: df, copy_col=True)
sim.add_table('test_func_uncopied', lambda: df, copy_col=False)
@sim.table(copy_col=True)
def test_funcd_copied():
return df
@sim.table(copy_col=False)
def test_funcd_uncopied():
return df
@sim.table(copy_col=True)
def test_funcd_copied2(test_frame_copied):
# local returns original, but it is copied by copy_col.
return test_frame_copied.local
@sim.table(copy_col=True)
def test_funcd_copied3(test_frame_uncopied):
# local returns original, but it is copied by copy_col.
return test_frame_uncopied.local
@sim.table(copy_col=False)
def test_funcd_uncopied2(test_frame_copied):
# local returns original.
return test_frame_copied.local
@sim.table(copy_col=False)
def test_funcd_uncopied3(test_frame_uncopied):
# local returns original.
return test_frame_uncopied.local
sim.add_table('test_cache_copied', lambda: df, cache=True, copy_col=True)
sim.add_table(
'test_cache_uncopied', lambda: df, cache=True, copy_col=False)
@sim.table(cache=True, copy_col=True)
def test_cached_copied():
return df
@sim.table(cache=True, copy_col=False)
def test_cached_uncopied():
return df
# Create tables with computed columns.
sim.add_table('test_copied_columns', pd.DataFrame(index=df.index),
copy_col=True)
sim.add_table('test_uncopied_columns', pd.DataFrame(index=df.index),
copy_col=False)
for column_name in ['a', 'b']:
label = "test_frame_uncopied.{}".format(column_name)
func = lambda col=label: col
for table_name in ['test_copied_columns', 'test_uncopied_columns']:
sim.add_column(table_name, column_name, func)
for name in ['test_frame_uncopied', 'test_func_uncopied',
'test_funcd_uncopied', 'test_funcd_uncopied2',
'test_funcd_uncopied3', 'test_cache_uncopied',
'test_cached_uncopied', 'test_uncopied_columns',
'test_frame_copied', 'test_func_copied',
'test_funcd_copied', 'test_funcd_copied2',
'test_funcd_copied3', 'test_cache_copied',
'test_cached_copied', 'test_copied_columns']:
table = sim.get_table(name)
table2 = sim.get_table(name)
# to_frame will always return a copy.
pdt.assert_frame_equal(table.to_frame(), df)
assert table.to_frame() is not df
pdt.assert_frame_equal(table.to_frame(), table.to_frame())
assert table.to_frame() is not table.to_frame()
pdt.assert_series_equal(table.to_frame()['a'], df['a'])
assert table.to_frame()['a'] is not df['a']
pdt.assert_series_equal(table.to_frame()['a'],
table.to_frame()['a'])
assert table.to_frame()['a'] is not table.to_frame()['a']
if 'uncopied' in name:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is table2['a']
else:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is not df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is not table2['a']
def test_columns_for_table():
sim.add_column(
'table1', 'col10', pd.Series([1, 2, 3], index=['a', 'b', 'c']))
sim.add_column(
'table2', 'col20', pd.Series([10, 11, 12], index=['x', 'y', 'z']))
@sim.column('table1')
def col11():
return pd.Series([4, 5, 6], index=['a', 'b', 'c'])
@sim.column('table2', 'col21')
def asdf():
return pd.Series([13, 14, 15], index=['x', 'y', 'z'])
t1_col_names = sim._list_columns_for_table('table1')
assert set(t1_col_names) == {'col10', 'col11'}
t2_col_names = sim._list_columns_for_table('table2')
assert set(t2_col_names) == {'col20', 'col21'}
t1_cols = sim._columns_for_table('table1')
assert 'col10' in t1_cols and 'col11' in t1_cols
t2_cols = sim._columns_for_table('table2')
assert 'col20' in t2_cols and 'col21' in t2_cols
def test_columns_and_tables(df):
sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
sim.add_column('test_frame', 'c', pd.Series([7, 8, 9], index=df.index))
@sim.column('test_func', 'd')
def asdf(test_func):
return test_func.to_frame(columns=['b'])['b'] * 2
@sim.column('test_func')
def e(column='test_func.d'):
return column + 1
test_frame = sim.get_table('test_frame')
assert set(test_frame.columns) == set(['a', 'b', 'c'])
assert_frames_equal(
test_frame.to_frame(),
pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_frame.to_frame(columns=['a', 'c']),
pd.DataFrame(
{'a': [1, 2, 3],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
test_func_df = sim._TABLES['test_func']
assert set(test_func_df.columns) == set(['d', 'e'])
assert_frames_equal(
test_func_df.to_frame(),
pd.DataFrame(
{'a': [0.5, 1, 1.5],
'b': [2, 2.5, 3],
'c': [3.5, 4, 4.5],
'd': [4., 5., 6.],
'e': [5., 6., 7.]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_func_df.to_frame(columns=['b', 'd']),
pd.DataFrame(
{'b': [2, 2.5, 3],
'd': [4., 5., 6.]},
index=['x', 'y', 'z']))
assert set(test_func_df.columns) == set(['a', 'b', 'c', 'd', 'e'])
assert set(sim.list_columns()) == {('test_frame', 'c'), ('test_func', 'd'),
('test_func', 'e')}
def test_column_cache(df):
sim.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@sim.table()
def table():
return df
@sim.column(*key, cache=True)
def column(variable='x'):
return series * variable
c = lambda: sim._COLUMNS[key]
pdt.assert_series_equal(c()(), series * 2)
sim.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 2)
c().clear_cached()
pdt.assert_series_equal(c()(), series * 3)
sim.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
sim.clear_cache()
pdt.assert_series_equal(c()(), series * 4)
sim.add_injectable('x', 5)
pdt.assert_series_equal(c()(), series * 4)
sim.get_table('table').clear_cached()
pdt.assert_series_equal(c()(), series * 5)
sim.add_injectable('x', 6)
pdt.assert_series_equal(c()(), series * 5)
sim.add_column(*key, column=column, cache=True)
pdt.assert_series_equal(c()(), series * 6)
def test_column_cache_disabled(df):
sim.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@sim.table()
def table():
return df
@sim.column(*key, cache=True)
def column(x):
return series * x
c = lambda: sim._COLUMNS[key]
sim.disable_cache()
pdt.assert_series_equal(c()(), series * 2)
sim.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 3)
sim.enable_cache()
sim.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
def test_update_col(df):
wrapped = sim.add_table('table', df)
wrapped.update_col('b', pd.Series([7, 8, 9], index=df.index))
pdt.assert_series_equal(wrapped['b'], pd.Series([7, 8, 9], index=df.index))
wrapped.update_col_from_series('a', pd.Series([]))
pdt.assert_series_equal(wrapped['a'], df['a'])
wrapped.update_col_from_series('a', pd.Series([99], index=['y']))
pdt.assert_series_equal(
wrapped['a'], pd.Series([1, 99, 3], index=df.index))
def test_models(df):
sim.add_table('test_table', df)
df2 = df / 2
sim.add_table('test_table2', df2)
@sim.model()
def test_model(test_table, test_column='test_table2.b'):
tt = test_table.to_frame()
test_table['a'] = tt['a'] + tt['b']
pdt.assert_series_equal(test_column, df2['b'])
with pytest.raises(KeyError):
sim.get_model('asdf')
model = sim.get_model('test_model')
assert model._tables_used() == set(['test_table', 'test_table2'])
model()
table = sim.get_table('test_table')
pdt.assert_frame_equal(
table.to_frame(),
pd.DataFrame(
{'a': [5, 7, 9],
'b': [4, 5, 6]},
index=['x', 'y', 'z']))
assert sim.list_models() == ['test_model']
def test_model_run(df):
sim.add_table('test_table', df)
@sim.table()
def table_func(test_table):
tt = test_table.to_frame()
tt['c'] = [7, 8, 9]
return tt
@sim.column('table_func')
def new_col(test_table, table_func):
tt = test_table.to_frame()
tf = table_func.to_frame(columns=['c'])
return tt['a'] + tt['b'] + tf['c']
@sim.model()
def test_model1(year, test_table, table_func):
tf = table_func.to_frame(columns=['new_col'])
test_table[year] = tf['new_col'] + year
@sim.model('test_model2')
def asdf(table='test_table'):
tt = table.to_frame()
table['a'] = tt['a'] ** 2
sim.run(models=['test_model1', 'test_model2'], years=[2000, 3000])
test_table = sim.get_table('test_table')
assert_frames_equal(
test_table.to_frame(),
pd.DataFrame(
{'a': [1, 16, 81],
'b': [4, 5, 6],
2000: [2012, 2015, 2018],
3000: [3012, 3017, 3024]},
index=['x', 'y', 'z']))
m = sim.get_model('test_model1')
assert set(m._tables_used()) == {'test_table', 'table_func'}
def test_get_broadcasts():
sim.broadcast('a', 'b')
sim.broadcast('b', 'c')
sim.broadcast('z', 'b')
sim.broadcast('f', 'g')
with pytest.raises(ValueError):
sim._get_broadcasts(['a', 'b', 'g'])
assert set(sim._get_broadcasts(['a', 'b', 'c', 'z']).keys()) == \
{('a', 'b'), ('b', 'c'), ('z', 'b')}
assert set(sim._get_broadcasts(['a', 'b', 'z']).keys()) == \
{('a', 'b'), ('z', 'b')}
assert set(sim._get_broadcasts(['a', 'b', 'c']).keys()) == \
{('a', 'b'), ('b', 'c')}
assert set(sim.list_broadcasts()) == \
{('a', 'b'), ('b', 'c'), ('z', 'b'), ('f', 'g')}
def test_collect_variables(df):
sim.add_table('df', df)
@sim.table()
def df_func():
return df
@sim.column('df')
def zzz():
return df['a'] / 2
sim.add_injectable('answer', 42)
@sim.injectable()
def injected():
return 'injected'
@sim.table('source table', cache=True)
def source():
return df
with pytest.raises(KeyError):
sim._collect_variables(['asdf'])
with pytest.raises(KeyError):
sim._collect_variables(names=['df'], expressions=['asdf'])
names = ['df', 'df_func', 'answer', 'injected', 'source_label', 'df_a']
expressions = ['source table', 'df.a']
things = sim._collect_variables(names, expressions)
assert set(things.keys()) == set(names)
assert isinstance(things['source_label'], sim.DataFrameWrapper)
pdt.assert_frame_equal(things['source_label'].to_frame(), df)
assert isinstance(things['df_a'], pd.Series)
pdt.assert_series_equal(things['df_a'], df['a'])
def test_collect_variables_expression_only(df):
@sim.table()
def table():
return df
vars = sim._collect_variables(['a'], ['table.a'])
pdt.assert_series_equal(vars['a'], df.a)
def test_injectables():
sim.add_injectable('answer', 42)
@sim.injectable()
def func1(answer):
return answer * 2
@sim.injectable('func2', autocall=False)
def asdf(variable='x'):
return variable / 2
@sim.injectable()
def func3(func2):
return func2(4)
@sim.injectable()
def func4(func='func1'):
return func / 2
assert sim._INJECTABLES['answer'] == 42
assert sim._INJECTABLES['func1']() == 42 * 2
assert sim._INJECTABLES['func2'](4) == 2
assert sim._INJECTABLES['func3']() == 2
assert sim._INJECTABLES['func4']() == 42
assert sim.get_injectable('answer') == 42
assert sim.get_injectable('func1') == 42 * 2
assert sim.get_injectable('func2')(4) == 2
assert sim.get_injectable('func3') == 2
assert sim.get_injectable('func4') == 42
with pytest.raises(KeyError):
sim.get_injectable('asdf')
assert set(sim.list_injectables()) == \
{'answer', 'func1', 'func2', 'func3', 'func4'}
def test_injectables_combined(df):
@sim.injectable()
def column():
return pd.Series(['a', 'b', 'c'], index=df.index)
@sim.table()
def table():
return df
@sim.model()
def model(table, column):
df = table.to_frame()
df['new'] = column
sim.add_table('table', df)
sim.run(models=['model'])
table_wr = sim.get_table('table').to_frame()
pdt.assert_frame_equal(table_wr[['a', 'b']], df)
pdt.assert_series_equal(table_wr['new'], column())
def test_injectables_cache():
x = 2
@sim.injectable(autocall=True, cache=True)
def inj():
return x * x
i = lambda: sim._INJECTABLES['inj']
assert i()() == 4
x = 3
assert i()() == 4
i().clear_cached()
assert i()() == 9
x = 4
assert i()() == 9
sim.clear_cache()
assert i()() == 16
x = 5
assert i()() == 16
sim.add_injectable('inj', inj, autocall=True, cache=True)
assert i()() == 25
def test_injectables_cache_disabled():
x = 2
@sim.injectable(autocall=True, cache=True)
def inj():
return x * x
i = lambda: sim._INJECTABLES['inj']
sim.disable_cache()
assert i()() == 4
x = 3
assert i()() == 9
sim.enable_cache()
assert i()() == 9
x = 4
assert i()() == 9
sim.disable_cache()
assert i()() == 16
def test_clear_cache_all(df):
@sim.table(cache=True)
def table():
return df
@sim.column('table', cache=True)
def z(table):
return df.a
@sim.injectable(cache=True)
def x():
return 'x'
sim.eval_variable('table.z')
sim.eval_variable('x')
assert sim._TABLE_CACHE.keys() == ['table']
assert sim._COLUMN_CACHE.keys() == [('table', 'z')]
assert sim._INJECTABLE_CACHE.keys() == ['x']
sim.clear_cache()
assert sim._TABLE_CACHE == {}
assert sim._COLUMN_CACHE == {}
assert sim._INJECTABLE_CACHE == {}
def test_clear_cache_scopes(df):
@sim.table(cache=True, cache_scope='forever')
def table():
return df
@sim.column('table', cache=True, cache_scope='iteration')
def z(table):
return df.a
@sim.injectable(cache=True, cache_scope='step')
def x():
return 'x'
sim.eval_variable('table.z')
sim.eval_variable('x')
assert sim._TABLE_CACHE.keys() == ['table']
assert sim._COLUMN_CACHE.keys() == [('table', 'z')]
assert sim._INJECTABLE_CACHE.keys() == ['x']
sim.clear_cache(scope='step')
assert sim._TABLE_CACHE.keys() == ['table']
assert sim._COLUMN_CACHE.keys() == [('table', 'z')]
assert sim._INJECTABLE_CACHE == {}
sim.clear_cache(scope='iteration')
assert sim._TABLE_CACHE.keys() == ['table']
assert sim._COLUMN_CACHE == {}
assert sim._INJECTABLE_CACHE == {}
sim.clear_cache(scope='forever')
assert sim._TABLE_CACHE == {}
assert sim._COLUMN_CACHE == {}
assert sim._INJECTABLE_CACHE == {}
def test_cache_scope(df):
sim.add_injectable('x', 11)
sim.add_injectable('y', 22)
sim.add_injectable('z', 33)
sim.add_injectable('iterations', 1)
@sim.injectable(cache=True, cache_scope='forever')
def a(x):
return x
@sim.injectable(cache=True, cache_scope='iteration')
def b(y):
return y
@sim.injectable(cache=True, cache_scope='step')
def c(z):
return z
@sim.model()
def m1(year, a, b, c):
sim.add_injectable('x', year + a)
sim.add_injectable('y', year + b)
sim.add_injectable('z', year + c)
assert a == 11
@sim.model()
def m2(year, a, b, c, iterations):
assert a == 11
if year == 1000:
assert b == 22
assert c == 1033
elif year == 2000:
assert b == 1022
assert c == 3033
sim.add_injectable('iterations', iterations + 1)
sim.run(['m1', 'm2'], years=[1000, 2000])
def test_table_func_local_cols(df):
@sim.table()
def table():
return df
sim.add_column('table', 'new', pd.Series(['a', 'b', 'c'], index=df.index))
assert sim.get_table('table').local_columns == ['a', 'b']
def test_is_table(df):
sim.add_table('table', df)
assert sim._is_table('table') is True
assert sim._is_table('asdf') is False
@pytest.fixture
def store_name(request):
fname = tempfile.NamedTemporaryFile(suffix='.h5').name
def fin():
if os.path.isfile(fname):
os.remove(fname)
request.addfinalizer(fin)
return fname
def test_write_tables(df, store_name):
sim.add_table('table', df)
@sim.model()
def model(table):
pass
sim.write_tables(store_name, ['model'], None)
with pd.get_store(store_name, mode='r') as store:
assert 'table' in store
| pdt.assert_frame_equal(store['table'], df) | pandas.util.testing.assert_frame_equal |
import nltk.data
from gensim.models import word2vec
from gensim.models.word2vec import LineSentence
from sklearn.cluster import KMeans
from sklearn.neighbors import KDTree
import pandas as pd
import numpy as np;import os
import re
import logging
import sqlite3
import time
import sys
import multiprocessing
from wordcloud import WordCloud, ImageColorGenerator
import matplotlib.pyplot as plt
from itertools import cycle
class Clustering:
def __init__(self, noOfClusters, noOfComments, noOfTopWords ):
self.tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
self.cmaps = cycle([
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'
])
self.ENGLISH_STOP_WORDS = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves",
"you", "your", "yours", "yourself", "yourselves", "he", "him",
"his", "himself", "she", "her", "hers", "herself", "it", "its",
"itself", "they", "them", "their", "theirs", "themselves", "what",
"which", "who", "whom", "this", "that", "these", "those", "am", "is",
"are", "was", "were", "be", "been", "being", "have", "has", "had",
"having", "do", "does", "did", "doing", "a", "an", "the", "and", "but",
"if", "or", "because", "as", "until", "while", "of", "at", "by", "for",
"with", "about", "against", "between", "into", "through", "during", "before",
"after", "above", "below", "to", "from", "up", "down", "in", "out", "on",
"off", "over", "under", "again", "further", "then", "once", "here", "there",
"when", "where", "why", "how", "all", "any", "both", "each", "few", "more",
"most", "other", "some", "such", "no", "nor", "not", "only", "own", "same",
"so", "than", "too", "very", "s", "t", "can", "will", "just", "don", "should",
"now"
]
self.EXPLICIT_WORDS = ["anal", "anus", "ballsack", "blowjob", "butt", "blow", "job", "boner", "clitoris",
"cock", "cunt", "dick", "dildo", "dyke", "fag", "fuck", "fuckin", "jizz", "labia", "muff",
"nigger", "nigga", "penis", "piss", "pussy", "scrotum", "sex", "shit", "slut",
"smegma", "spunk", "twat", "vagina", "wank", "whore"
]
self.noOfClusters = noOfClusters
self.noOfComments = noOfComments
self.noOfTopWords = noOfTopWords + 10
def clean_text(self, all_comments, out_name):
out_file = open(out_name, 'w')
for pos in range(len(all_comments)):
#get the comment
val = all_comments.iloc[pos]['body']
#normalize tabs and remove newlines
no_tabs = str(val).replace('\t', ' ').replace('\n', '')
#remove all characters except A-Z and a dot.
alphas_only = re.sub("[^a-zA-Z\.]", " ", no_tabs)
#normalize spaces to 1
multi_spaces = re.sub(" +", " ", alphas_only)
#strip trailing and leading spaces
no_spaces = multi_spaces.strip()
#normalize all charachters to lowercase
clean_text = no_spaces.lower()
#get sentences from the tokenizer, remove the dot in each.
sentences = self.tokenizer.tokenize(clean_text)
sentences = [re.sub("[\.]", "", sentence) for sentence in sentences]
#if the text has more than one space (removing single word comments) and one character, write it to the file.
if len(clean_text) > 0 and clean_text.count(' ') > 0:
for sentence in sentences:
out_file.write("%s\n" % sentence)
#print(sentence)
out_file.close()
def clustering_on_wordvecs(self, word_vectors, num_clusters):
#initalize a k-means object and use it to extract centroids
kmeans_clustering = KMeans(n_clusters = num_clusters, init='k-means++')
idx = kmeans_clustering.fit_predict(word_vectors)
return kmeans_clustering.cluster_centers_, idx
def get_top_words(self, index2word, k, centers, wordvecs):
tree = KDTree(wordvecs)
#closest points for each Cluster center is used to query the closest 20 points to it.
closest_points = [tree.query(np.reshape(x, (1, -1)), k=k) for x in centers]
closest_words_idxs = [x[1] for x in closest_points]
#word Index is queried for each position in the above array, and added to a Dictionary.
closest_words = {}
for i in range(0, len(closest_words_idxs)):
closest_words['Cluster #' + str(i+1).zfill(2)] = [index2word[j] for j in closest_words_idxs[i][0]]
#DataFrame is generated from the dictionary.
df = pd.DataFrame(closest_words)
df.index = df.index+1
return df
def display_cloud(self, cluster_num, cmap):
wc = WordCloud(background_color="gray", max_words=2000, max_font_size=80, colormap=cmap)
#wordcloud = wc.generate(' '.join([word for word in top_words['Cluster #' + str(cluster_num).zfill(2)] if word not in ENGLISH_STOP_WORDS]))
try:
words = [word for word in self.top_words['Cluster #' + str(cluster_num).zfill(2)] if word not in self.ENGLISH_STOP_WORDS and len(word) > 2]
wordcloud = wc.generate(' '.join([word for word in words if not any(explicitWord in word for explicitWord in self.EXPLICIT_WORDS)])) #if any(xs in s for xs in matchers)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.savefig('static\\images\\' + 'img_' + str(cluster_num), bbox_inches='tight')
except Exception:
pass
def print_word_table(self, table, key):
return pd.DataFrame(table, columns=[key, 'similarity'])
def main_func(self):
#------------read and load data carefully from sqlite db --------------------
sql_con = sqlite3.connect("T:\\2018++\\BE\LP\\LP2\\extras\\database.sqlite\\database.sqlite")
cursor = sql_con.cursor()
print("Connected to database")
cursor.execute("SELECT body FROM May2015")
count = 0
col_names = ['body']
sql_data = pd.DataFrame(columns = col_names)
#print(sql_data)
#print(len(sql_data))
start = time.time()
for row in cursor:
if count == self.noOfComments:
break
temp_dic = {'body':row[0]}
sql_data.loc[len(sql_data)] = temp_dic
count+=1
print(len(sql_data))
print('Total time: ' + str((time.time() - start)) + ' secs')
start = time.time()
self.clean_text(sql_data, 'out_full')
print('Total time: ' + str((time.time() - start)) + ' secs')
#--------- training and saving model ------------------------
start = time.time()
#dimensionality of the hidden layer representation
num_features = 100
#minimum word count to keep a word in the vocabulary
min_word_count = 40
#number of threads to run in parallel
#set to total number of cpus.
num_workers = multiprocessing.cpu_count()
#context window size (on each side)
context = 5
#downsample setting for frequent words
downsampling = 1e-3
#initialize and train the model.
print("Training model...")
model = word2vec.Word2Vec(LineSentence('out_full'), workers=num_workers, size=num_features, min_count = min_word_count, window = context, sample = downsampling)
model.init_sims(replace=True)
#save the model
model_name = "model_full_reddit"
model.save(model_name)
print('Total time: ' + str((time.time() - start)) + ' secs')
Z = model.wv.syn0
print(Z[0].shape)
#print(Z[0])
#---------cluster the word vectors obtained-------------
start = time.time()
centers, clusters = self.clustering_on_wordvecs(Z, self.noOfClusters)
print('Total time: ' + str((time.time() - start)) + ' secs')
start = time.time()
centroid_map = dict(zip(model.wv.index2word, clusters))
print('Total time: ' + str((time.time() - start)) + ' secs')
#------just for display------
| pd.set_option('display.max_columns', None) | pandas.set_option |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = | tm.box_expected(idx, box_with_array) | pandas._testing.box_expected |
import unittest
import numpy as np
import pandas as pd
import scipy.stats as st
from os import path, getcwd
from ..graphs import GraphGroupScatter
from ..data import Vector
from ..analysis.exc import NoDataError
from ..data import UnequalVectorLengthError
class MyTestCase(unittest.TestCase):
@property
def save_path(self):
if getcwd().split('/')[-1] == 'test':
return './images/'
elif getcwd().split('/')[-1] == 'sci_analysis':
if path.exists('./setup.py'):
return './sci_analysis/test/images/'
else:
return './test/images/'
else:
'./'
def test_1_scatter_two_groups_default(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
save_to='{}test_group_scatter_1'.format(self.save_path)))
def test_2_scatter_two_groups_no_fit(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], fit=False,
save_to='{}test_group_scatter_2'.format(self.save_path)))
def test_3_scatter_two_groups_no_points(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], points=False,
save_to='{}test_group_scatter_3'.format(self.save_path)))
def test_4_scatter_two_groups_highlight_one(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=[2],
save_to='{}test_group_scatter_4'.format(self.save_path)))
def test_5_scatter_three_groups_highlight_two(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = [1] * 100 + [2] * 100 + [3] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=[2, 3],
save_to='{}test_group_scatter_5'.format(self.save_path)))
def test_6_scatter_two_groups_highlight_one_no_points(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=[2],
points=False, save_to='{}test_group_scatter_6'.format(self.save_path)))
def test_7_scatter_two_groups_highlight_one_no_fit(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=[2],
fit=False, save_to='{}test_group_scatter_7'.format(self.save_path)))
def test_8_scatter_two_groups_highlight_one_scalar_num(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = [1] * 100 + [2] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=2,
save_to='{}test_group_scatter_8'.format(self.save_path)))
def test_9_scatter_two_groups_string_names_highlight_one(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = ['a'] * 100 + ['b'] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight=['b'],
save_to='{}test_group_scatter_9'.format(self.save_path)))
def test_10_scatter_three_groups_string_names_highlight_scalar_string(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], highlight='bc',
save_to='{}test_group_scatter_10'.format(self.save_path)))
def test_11_scatter_three_groups_invalid_highlight_groups(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
highlight=['z', 'y', 'x'],
save_to='{}test_group_scatter_11'.format(self.save_path)))
def test_12_scatter_two_groups_no_boxplot_borders(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = ['a'] * 100 + ['b'] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
boxplot_borders=False,
save_to='{}test_group_scatter_12'.format(self.save_path)))
def test_13_scatter_two_groups_title(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = ['a'] * 100 + ['b'] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'],
title='Title Test', save_to='{}test_group_scatter_13'.format(self.save_path)))
def test_14_scatter_two_groups_labels(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
grp = ['a'] * 100 + ['b'] * 100
cs_x = np.concatenate((input_1_x, input_2_x))
cs_y = np.concatenate((input_1_y, input_2_y))
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertTrue(GraphGroupScatter(input_array['a'], input_array['b'], groups=input_array['c'], xname='Test x',
yname='Test y', save_to='{}test_group_scatter_14'.format(self.save_path)))
def test_15_scatter_three_groups_auto_named(self):
np.random.seed(987654321)
input_1_x = st.norm.rvs(size=100)
input_1_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_1_x]
input_2_x = st.norm.rvs(size=100)
input_2_y = [(x / 2) + st.norm.rvs(0, 0.2, size=1)[0] for x in input_2_x]
input_3_x = st.norm.rvs(size=100)
input_3_y = np.array([(x * 1.5) + st.norm.rvs(size=100)[0] for x in input_3_x]) - 0.5
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100
cs_x = np.concatenate((input_1_x, input_2_x, input_3_x))
cs_y = np.concatenate((input_1_y, input_2_y, input_3_y))
input_array = | pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp}) | pandas.DataFrame |
# coding: utf-8
from __future__ import unicode_literals, print_function
import matplotlib
import matplotlib.dates
import matplotlib.patches as mpatch
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import random
from . import core
from .. import metrics
available_series = ['bonded_slowdown', 'waiting_time', 'all']
def annotate(ax, rect, annot):
rx, ry = rect.get_xy()
cx = rx + rect.get_width() / 2.0
cy = ry + rect.get_height() / 2.0
ax.annotate(annot, (cx, cy), color='black',
fontsize='small', ha='center', va='center')
def map_unique_numbers(df):
"""Map the DataFrame of jobs to a set of jobs which should be labeled and a list of unique ids
for the given DataFrame.
Jobs which have the same jobID and workload_name will be merged together and the same
unique_id will be assigned to them. The set of labeled_jobs will only contain the job
in the middle of each list of jobs sharing the same id.
"""
labeled_jobs = set()
unique_numbers = []
# Jobs start their number with 1
number_counter = 1
numbers_map = {}
jobs_for_unique_number = {}
for index, row in df.iterrows():
workload_name = str(row["workload_name"])
job_id = str(row["jobID"])
full_job_id = workload_name + "!" + job_id
job_intervals=row['allocated_resources']
try:
# The job id was already in the workload: re-use the same unique id.
unique_number = numbers_map[full_job_id]
list_of_jobs = jobs_for_unique_number[full_job_id]
except KeyError:
# The job id is new: generate a new unique number for this
# workload_name!jobID combination.
unique_number = number_counter
numbers_map[full_job_id] = number_counter
number_counter += 1
jobs_for_unique_number[full_job_id] = list_of_jobs = []
if job_intervals:
list_of_jobs.append((index, row))
unique_numbers.append(unique_number)
for k, v in jobs_for_unique_number.items():
# If there are jobs for this job id which have job intervals:
# search for the element in the middle and add its index to the set.
if v:
index, row = v[len(v)//2]
labeled_jobs.add(index)
return labeled_jobs, unique_numbers
def plot_gantt(jobset, ax=None, title="Gantt chart",
labels=True, palette=None, alpha=0.4,
time_scale=False,
color_function=None,
label_function=None):
# Palette generation if needed
if palette is None:
palette = core.generate_palette(8)
assert(len(palette) > 0)
if color_function is None:
def color_randrobin_select(job, palette):
return palette[job.unique_number % len(palette)]
color_function = color_randrobin_select
if label_function is None:
def job_id_label(job):
return job['jobID']
label_function = job_id_label
# Get current axe to plot
if ax is None:
ax = plt.gca()
df = jobset.df.copy()
labeled_jobs, unique_numbers = map_unique_numbers(df)
df["unique_number"] = unique_numbers
if time_scale:
df['submission_time'] = | pd.to_datetime(df['submission_time'], unit='s') | pandas.to_datetime |
"""
**pyPheWAS Core version 2 (main pyPheWAS code)**
Contains all functions that drive the core PheWAS & ProWAS analysis tools.
"""
from collections import Counter
import getopt
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import statsmodels.discrete.discrete_model as sm
import statsmodels.formula.api as smf
import matplotlib.lines as mlines
from tqdm import tqdm
import sys
import matplotlib
def print_start_msg():
path = os.path.dirname(os.path.abspath(__file__))
filename = os.sep.join([path, 'resources', 'pyPheWAS_start_msg.txt'])
with open(filename, 'r') as msg_f:
print('\n')
for line in msg_f:
print(line.strip('\n'))
return
def get_codes(filename):
"""
Load PheWAS/ProWAS code map from the resources directory.
:param filename: name of file in the resources folder to load
:type filename: str
:returns: code map from the given file
:rtype: pandas DataFrame
"""
path = os.path.dirname(os.path.abspath(__file__))
filename = os.sep.join([path, 'resources', filename])
if 'icd9' in filename:
data_col = 'ICD9'
new_data_col = 'ICD_CODE'
phecode_col = 'PheCode'
elif 'icd10' in filename:
data_col = 'ICD10'
new_data_col = 'ICD_CODE'
phecode_col = 'PheCode'
else:
data_col = 'cpt'
new_data_col = 'CPT_CODE'
phecode_col = 'prowas_code'
try:
phecode_map = pd.read_csv(filename, dtype={data_col:str, phecode_col:str})
phecode_map.rename(columns={data_col:new_data_col},inplace=True)
phecode_map.dropna(subset=[phecode_col], inplace=True)
phecode_map.drop_duplicates(subset=[new_data_col,phecode_col], inplace=True)
except Exception as e:
print(e.args[0])
print('Error loading phecode map : exiting pyPheWAS')
sys.exit()
return phecode_map
def get_group_file(path, filename):
"""
Read group data from the given file.
Note: Any records with a null **id** are dropped.
:param path: path to the file that contains the group data
:param filename: name of the file that contains the group data.
:type path: pathlib Path
:type filename: string
:returns: The data from the group file.
:rtype: pandas DataFrame
"""
wholefname = path / filename
genotypes = pd.read_csv(wholefname)
genotypes = genotypes.dropna(subset=['id'])
return genotypes
def get_icd_codes(path, filename, reg_type):
"""
Read ICD data from the given file and load it into a pandas DataFrame.
ICD records are mapped to their correpsonding PheWAS Codes.
The maximum age of each subject at each PheWAS Code is calculated and
added to the DataFrame as the column *MaxAgeAtICD*. If ``reg_type`` = 2, the
interval of time (years) over which a subject experiences each PheWAS Code
is added as the column *duration*.
:param path: path to the file that contains the phenotype data
:param filename: name of the file that contains the phenotype data.
:param reg_type: type of regression (0:binary, 1:count, 2:duration)
:type path: pathlib Path
:type filename: str
:type reg_type: int
:returns: data from the phenotype file.
:rtype: pandas DataFrame
"""
wholefname = path / filename
icdfile = | pd.read_csv(wholefname,dtype={'ICD_CODE':str}) | pandas.read_csv |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert WEEK_END not in move_df
move_df.generate_weekend_features()
assert_frame_equal(move_df, expected)
def test_generate_time_of_day_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_of_day_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Early morning',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'period'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert PERIOD not in move_df
move_df.generate_time_of_day_features()
assert_frame_equal(move_df, expected)
def test_generate_datetime_in_format_cyclical():
move_df = _default_move_df()
new_move_df = move_df.generate_datetime_in_format_cyclical(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR_SIN not in move_df
move_df.generate_datetime_in_format_cyclical()
assert_frame_equal(move_df, expected)
def test_generate_dist_time_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_time_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
],
[
1,
39.984198,
116.319322,
| Timestamp('2008-10-23 05:53:06') | pandas.Timestamp |
# -*- coding: utf-8 -*-
from sklearn.neighbors import KNeighborsRegressor
import numpy as np
import pandas as pd
def return_alike_axis(X,Y):
idx = [x for x in X.index if x in Y.index]
X = X.loc[idx]
Y = Y.loc[idx]
return (X,Y)
def get_data(series, steps, forward = False):
if forward:
fb = -1
else:
fb = 1
columns = [series.name + '_t_'+ '{0:0>2}'.format(str(i)) for i in range(1,steps+1)]
data = [list(series.shift(i*fb).values) for i in range(1,steps+1)]
df = pd.DataFrame(index = series.index, data = {k:v for k,v in zip(columns, data)}).dropna()
return df
def get_data_df(df, steps, forward = False):
df_list = []
for column in df.columns:
d = get_data(df[column], steps, forward = forward)
df_list.append(d)
df = pd.concat(df_list, axis = 1)
return df
def preprocess(data, y=False, freqstr='H', steps = 24, limit = 5, forward = False):
if y:
if isinstance(data, pd.DataFrame):
if data.shape[1]>1:
raise ValueError('y must be of shape (n,1)')
else:
data = pd.DataFrame(data).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
data = get_data_df(data, steps, forward = forward)
return data
class KnnEnsemble:
def __init__(self, n_neighbors=[3,5,7], weights='uniform', algorithm='auto',
leaf_size=30, p=2, metric='minkowski', metric_params=None,
n_jobs=1, **kwargs):
self.n_neighbors = n_neighbors,
self.weights = weights
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.metric = metric
self.metric_params = metric_params
self.n_jobs = n_jobs
self.kwargs = kwargs
self.model_dict = {}
self.n = 0
self.params = 0
self.X = None
self.y = None
self.high = None
self.low = None
self.lags = None
self.x_shape = None
self.freqstr = None
self.limit = None
self.x_test = None
self.h = None
for n in self.n_neighbors[0]:
model = KNeighborsRegressor(n_neighbors = n, weights=self.weights, algorithm=self.algorithm,
leaf_size=self.leaf_size, p=self.p, metric=self.metric, metric_params=self.metric_params,
n_jobs=self.n_jobs, **self.kwargs)
self.model_dict.update({n:{'model':model}})
def fit(self, x, y, freqstr='H', h=24, lags=15, limit = 5, new_fit = True):
if lags:
self.freqstr = freqstr
self.lags = lags
self.limit = limit
self.h = h
x = pd.DataFrame(x).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
self.shape = x.shape
if isinstance(y, pd.DataFrame):
if y.shape[1]>1:
raise ValueError('y must be of shape (n,1)')
else:
y = pd.DataFrame(y).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
X = get_data_df(x, lags, forward = False)
Y = get_data_df(y, h, forward = True)
X,Y = return_alike_axis(X,Y)
else:
X=x
Y=y
if new_fit:
self.X = X
self.y = Y
for n in self.n_neighbors[0]:
self.model_dict[n]['model'] = self.model_dict[n]['model'].fit(X,Y)
def static(self, X, test = False, reshape = True):
if reshape:
X = pd.DataFrame(X)
X = X.asfreq(freq = self.freqstr).interpolate(limit = self.limit).dropna()
X = get_data_df(X, self.lags, forward = False).dropna()
if test:
self.x_test = X
pred_list = []
for n in self.n_neighbors[0]:
try:
self.model_dict[n]['predict'] = self.model_dict[n]['model'].predict(X)
except KeyError:
self.model_dict[n].update({'predict':self.model_dict[n]['model'].predict(X)})
pred_list.append(self.model_dict[n]['predict'])
preds = np.mean(pred_list, axis = 0)
return preds
def predict(self, X, freqstr = 'H', h = 24):
try:
preds = self.static(X, reshape = False)
except:
preds = self.static(X, reshape = True)
s_list = []
idx = X.index
for index,pred in enumerate(preds):
ts = idx[index]
date= pd.date_range(ts, periods=len(pred), freq=freqstr) + 1
s = pd.Series(data = pred, index = date)
s_list.append(s)
return s_list
def error(self, X_test, y_test, dynamic = False):
y_test = pd.DataFrame(y_test).asfreq(freq = self.freqstr).interpolate(limit = self.limit).dropna()
y_test = get_data_df(y_test, self.h, forward = True)
X_test,y_test = return_alike_axis(X_test,y_test)
if dynamic:
y_hat = self.dynamic(X_test)
else:
y_hat = self.static(X_test)
rmse = np.sqrt((np.subtract(y_test,y_hat)**2).mean())
#aic = (np.log(rmse/self.n) + 2 * (self.params +1)).mean()
return rmse
def forward_selection(self, x_train, y_train, x_test, y_test, freqstr='H', h = 24, max_lags = 15, start_time = None, interpolate = True, limit = 5, brk_at_min=False):
if interpolate:
X_test = pd.DataFrame(x_test).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
Y_test = pd.DataFrame(y_test).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
X_train = pd.DataFrame(x_train).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
Y_train = pd.DataFrame(y_train).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
else:
X_test = pd.DataFrame(x_test).asfreq(freq = freqstr).dropna()
Y_test = pd.DataFrame(y_test).asfreq(freq = freqstr).dropna()
X_train = pd.DataFrame(x_train).asfreq(freq = freqstr).dropna()
Y_train = pd.DataFrame(y_train).asfreq(freq = freqstr).dropna()
Y_train = get_data_df(Y_train, h, forward = True).dropna()
X_train = get_data_df(X_train, max_lags, forward = False).dropna()
X_train,Y_train = return_alike_axis(X_train,Y_train)
Y_test = get_data_df(Y_test, h, forward = True).dropna()
X_test = get_data_df(X_test, max_lags, forward = False).dropna()
X_test,Y_test = return_alike_axis(X_test,Y_test)
if start_time:
Y_train = Y_train.between_time(start_time,start_time)
X_train = X_train.between_time(start_time,start_time)
Y_test = Y_test.between_time(start_time,start_time)
X_test = X_test.between_time(start_time,start_time)
errors = {}
min_rmse = float('inf')
for lag in range(1,max_lags+1):
x_train = X_train.iloc[:,0:lag]
x_test = X_test.iloc[:,0:lag]
self.fit(x_train, Y_train, lags=False)
y_hat = self.static(x_test, test = False, reshape = False)
rmse = np.sqrt((np.subtract(Y_test,y_hat)**2).mean())
errors.update({'lag_'+str(lag):rmse})
if brk_at_min:
if rmse.mean()<min_rmse:
min_rmse = rmse.mean()
else:
break
return pd.DataFrame(data = errors)
def backward_selection(self, x_train, y_train, x_test, y_test, freqstr='H', h = 24, lags = 15, start_time = None, interpolate = True, limit = 5, brk_at_min=False):
"""
Given an x_train, y_train and x_test, y_test the backward selection removes the beginning lags and records the error
returns the error dataframe
"""
if interpolate:
X_test = pd.DataFrame(x_test).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
Y_test = pd.DataFrame(y_test).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
X_train = pd.DataFrame(x_train).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
Y_train = pd.DataFrame(y_train).asfreq(freq = freqstr).interpolate(limit = limit).dropna()
else:
X_test = pd.DataFrame(x_test).asfreq(freq = freqstr).dropna()
Y_test = pd.DataFrame(y_test).asfreq(freq = freqstr).dropna()
X_train = pd.DataFrame(x_train).asfreq(freq = freqstr).dropna()
Y_train = pd.DataFrame(y_train).asfreq(freq = freqstr).dropna()
Y_train = get_data_df(Y_train, h, forward = True).dropna()
X_train = get_data_df(X_train, lags, forward = False).dropna()
X_train,Y_train = return_alike_axis(X_train,Y_train)
Y_test = get_data_df(Y_test, h, forward = True).dropna()
X_test = get_data_df(X_test, lags, forward = False).dropna()
X_test,Y_test = return_alike_axis(X_test,Y_test)
if start_time:
Y_train = Y_train.between_time(start_time,start_time)
X_train = X_train.between_time(start_time,start_time)
Y_test = Y_test.between_time(start_time,start_time)
X_test = X_test.between_time(start_time,start_time)
errors = {}
for lag in range(0,lags):
self.fit(X_train.iloc[:,lag:],Y_train, lags = False)
y_hat = self.static(X_test.iloc[:,lag:], test = False, reshape = False)
rmse = np.sqrt((np.subtract(Y_test,y_hat)**2).mean())
errors.update({'lag_'+str(lag+1):rmse})
errors_df = pd.DataFrame(errors)
return errors_df
def forward_backward_selection(self, x_train, y_train, x_test, y_test, freqstr='H', h = 24, max_lags = 15, start_time = None, interpolate = True, limit = 5, brk_at_min=True):
forward_errors = self.forward_selection(x_train, y_train, x_test,
y_test, freqstr=freqstr, h = h,
max_lags = max_lags,
start_time=start_time,
interpolate = interpolate,
limit = limit,
brk_at_min=brk_at_min)
lags = int(forward_errors.mean().idxmin().split('_')[-1])
backward_errors = self.backward_selection(x_train, y_train,
x_test, y_test,
freqstr=freqstr, h = h,
lags = lags,
start_time=start_time,
interpolate = interpolate,
limit = limit,
brk_at_min=brk_at_min)
min_lag = int(backward_errors.mean().idxmin().split('_')[-1])
x_train = preprocess(x_train, steps=lags, limit = limit)
y_train = preprocess(y_train, steps=h, limit = limit)
x_train, y_train = return_alike_axis(x_train, y_train)
x_train = x_train.iloc[:,min_lag-1:]
y_train = y_train.iloc[:,min_lag-1:]
self.fit(x_train,y_train, lags = False)
return backward_errors
def __rtrn_fwd_lags(self, endogenous, exogenous=None, offset='Y', freqstr='H', h = 24, max_lags = 15, start_time=None, interpolate = True, limit = 5, brk_at_min=False):
if isinstance(exogenous, pd.DataFrame) or isinstance(exogenous, pd.Series):
endogenous,exogenous = return_alike_axis(endogenous,exogenous)
end_grpd = endogenous.groupby(pd.Grouper(freq = offset))
error_dict = {}
i=0
#Determine error in each offset for variale selection (Forward selection process)
for g,v in end_grpd:
x_test = v.dropna()
x_train = endogenous.copy().drop(index = x_test.index)
y_test = v.dropna()
y_train = x_train.copy()
if isinstance(exogenous, pd.DataFrame) or isinstance(exogenous, pd.Series):
x_test = pd.concat([x_test, exogenous.loc[x_test.index]], axis = 1).dropna()
x_train = pd.concat([x_train, exogenous.loc[x_train.index]], axis = 1).dropna()
x_train, y_train = return_alike_axis(x_train,y_train)
x_test, y_test = return_alike_axis(x_test,y_test)
errors = self.forward_selection(x_train, y_train, x_test, y_test,
freqstr=freqstr, h = h,
max_lags = max_lags,
start_time=start_time,
interpolate = interpolate, limit = limit,
brk_at_min=False)
error_dict.update({'offset_'+str(i):errors.mean()})
i+=1
df = | pd.DataFrame(error_dict) | pandas.DataFrame |
import torch
import pandas as pd
from fast_radiology.metrics import dice as dice3D
from artificial_contrast.const import (
DICE_NAME,
PATH_NAME,
PATIENT_NAME,
PREDICTIONS_NAME,
TARGETS_NAME,
)
def evaluate_patients(learn, patients, img_size):
results = []
preds, targets = learn.get_preds()
preds_df = pd.DataFrame(
{
PREDICTIONS_NAME: [
preds[i].argmax(0).view(1, img_size, img_size).int().numpy()
for i in range(len(preds))
],
TARGETS_NAME: [
targets[i].view(1, img_size, img_size).int().numpy()
for i in range(len(targets))
],
PATH_NAME: learn.data.valid_ds.items,
}
)
for patient in patients:
pred_3d = torch.tensor(
preds_df[preds_df[PATH_NAME].str.contains(patient)]
.sort_values(PATH_NAME)[PREDICTIONS_NAME]
.to_list()
)
pred_3d = pred_3d.view(-1, img_size, img_size)
target_3d = torch.tensor(
preds_df[preds_df[PATH_NAME].str.contains(patient)]
.sort_values(PATH_NAME)[TARGETS_NAME]
.to_list()
)
target_3d = target_3d.view(-1, img_size, img_size)
patient_dice = dice3D(pred_3d, target_3d)
results.append({PATIENT_NAME: patient, DICE_NAME: patient_dice.item()})
return | pd.DataFrame(results) | pandas.DataFrame |
import ast
import datetime
import time
import math
import pypandoc
import os
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import pandas as pd
import statsmodels.api as sm
from library.api import API_HOST, fetch_objects, fetch_objects_by_id, get_token
from library.settings import MIN_VIDEO_LENGTH
def get_unix_date(date):
if date:
timestamp = time.mktime(datetime.datetime.strptime(date.split('+')[0], "%Y-%m-%dT%H:%M:%SZ").timetuple())
return int(timestamp)
return None
def html2latex(text):
output = pypandoc.convert(text, 'latex', format='html', extra_args=['-f', 'html+tex_math_dollars'])
return output
def process_step_url(row):
if ('max_step_variation' not in row.index) or (row.max_step_variation == 1):
# no step variations
return '{}/lesson/{}/step/{}'.format(API_HOST, row.lesson_id, row.step_position)
return '{}/lesson/{}/step/{}?alternative={}'.format(API_HOST,
row.lesson_id, row.step_position, row.step_variation)
# API functions
def get_course_structure(course_id, cached=True, token=None):
# use cache
course_structure_filename = 'cache/course-{}-structure.csv'.format(course_id)
if os.path.isfile(course_structure_filename) and cached:
course_structure = pd.read_csv(course_structure_filename)
return course_structure
if not token:
token = get_token()
course = fetch_objects_by_id('courses', course_id, token=token)[0]
sections = fetch_objects('sections', token=token, id=course['sections'])
unit_ids = [unit for section in sections for unit in section['units']]
units = fetch_objects('units', token=token, id=unit_ids)
lesson_ids = [unit['lesson'] for unit in units]
lessons = fetch_objects('lessons', token=token, id=lesson_ids)
step_ids = [step for lesson in lessons for step in lesson['steps']]
steps = fetch_objects('steps', token=token, id=step_ids)
step_id = [step['id'] for step in steps]
step_position = [step['position'] for step in steps]
step_type = [step['block']['name'] for step in steps]
step_lesson = [step['lesson'] for step in steps]
step_correct_ratio = [step['correct_ratio'] for step in steps]
course_structure = pd.DataFrame({'course_id': course_id,
'lesson_id': step_lesson,
'step_id': step_id,
'step_position': step_position,
'step_type': step_type,
'step_correct_ratio': step_correct_ratio})
module_position = [[section['position']]*len(section['units']) for section in sections]
module_position = [value for small_list in module_position for value in small_list]
module_id = [[section['id']]*len(section['units']) for section in sections]
module_id = [value for small_list in module_id for value in small_list]
module_hard_deadline = [[section['hard_deadline']]*len(section['units']) for section in sections]
module_hard_deadline = [value for small_list in module_hard_deadline for value in small_list]
module_begin_date = [[section['begin_date']]*len(section['units']) for section in sections]
module_begin_date = [value for small_list in module_begin_date for value in small_list]
lesson_position = [unit['position'] for unit in units]
module_structure = pd.DataFrame({'lesson_id': lesson_ids,
'lesson_position': lesson_position,
'module_id': module_id,
'module_position': module_position,
'hard_deadline': module_hard_deadline,
'begin_date': module_begin_date})
course_structure = course_structure.merge(module_structure)
course_structure = course_structure.sort_values(['module_position', 'lesson_position', 'step_position'])
course_structure.to_csv(course_structure_filename, index=False)
return course_structure
def get_course_submissions(course_id, course_structure=pd.DataFrame(), cached=True, token=None):
header = ['submission_id', 'step_id', 'user_id', 'attempt_time', 'submission_time', 'status']
# use cache
course_submissions_filename = 'cache/course-{}-submissions.csv'.format(course_id)
if os.path.isfile(course_submissions_filename) and cached:
course_submissions = pd.read_csv(course_submissions_filename)
course_submissions = course_submissions[header]
return course_submissions
if not token:
token = get_token()
if course_structure.empty:
course_structure = get_course_structure(course_id, token)
course_submissions = pd.DataFrame()
for step in course_structure.step_id.unique().tolist():
step_submissions = pd.DataFrame(fetch_objects('submissions', token=token, step=step))
if step_submissions.empty:
continue
step_submissions = step_submissions.rename(columns={'id': 'submission_id',
'time': 'submission_time',
'attempt': 'attempt_id'})
attempt_ids = step_submissions['attempt_id'].unique().tolist()
step_attempts = pd.DataFrame(fetch_objects_by_id('attempts', attempt_ids, token=token))
step_attempts = step_attempts.rename(columns={'id': 'attempt_id',
'time': 'attempt_time',
'status': 'attempt_status'})
step_submissions = pd.merge(step_submissions, step_attempts, on='attempt_id')
step_submissions['step_id'] = step
course_submissions = course_submissions.append(step_submissions)
if course_submissions.empty:
return pd.DataFrame(columns=header)
course_submissions['submission_time'] = course_submissions['submission_time'].apply(get_unix_date)
course_submissions['attempt_time'] = course_submissions['attempt_time'].apply(get_unix_date)
course_submissions = course_submissions.rename(columns={'user': 'user_id'})
course_submissions = course_submissions[header]
course_submissions.to_csv(course_submissions_filename, index=False)
return course_submissions
def get_course_grades(course_id, cached=True, token=None):
header = ['user_id', 'step_id', 'is_passed', 'score', 'total_score', 'date_joined', 'last_viewed']
# use cache
course_grades_filename = 'cache/course-{}-grades.csv'.format(course_id)
if os.path.isfile(course_grades_filename) and cached:
course_grades = pd.read_csv(course_grades_filename)
course_grades = course_grades[header]
return course_grades
if not token:
token = get_token()
course_grades = pd.DataFrame()
grades = fetch_objects('course-grades', course=course_id, token=token)
for grade in grades:
user_grade = pd.DataFrame(grade['results']).transpose()
user_grade['user_id'] = grade['user']
user_grade['total_score'] = grade['score']
user_grade['date_joined'] = grade['date_joined']
user_grade['last_viewed'] = grade['last_viewed']
course_grades = course_grades.append(user_grade)
course_grades['date_joined'] = course_grades['date_joined'].apply(get_unix_date)
course_grades['last_viewed'] = course_grades['last_viewed'].apply(get_unix_date)
course_grades = course_grades.reset_index(drop=True)
course_grades = course_grades[header]
course_grades.to_csv(course_grades_filename, index=False)
return course_grades
def get_enrolled_users(course_id, token=None):
if not token:
token = get_token()
learner_group = fetch_objects('courses', token=token, pk=course_id)[0]['learners_group']
users = fetch_objects('groups', token=token, pk=learner_group)[0]['users']
return users
def process_options_with_name(data, reply, option_names):
data = ast.literal_eval(data)
reply = ast.literal_eval(reply)['choices']
is_multiple = data['is_multiple_choice']
options = data['options']
option_id = []
clue = []
for op in options:
if op in option_names.option_name.tolist():
val = option_names.loc[option_names.option_name == op, 'option_id'].values[0]
clue_val = option_names.loc[option_names.option_name == op, 'is_correct'].values[0]
else:
val = np.nan
clue_val = np.nan
option_id += [val]
clue += [clue_val]
answer = [(c == r) for c, r in zip(clue, reply)]
options = pd.DataFrame({'is_multiple': is_multiple,
'option_id': option_id,
'answer': answer,
'clue': clue})
options = options[['is_multiple', 'option_id', 'answer', 'clue']]
return options
def get_question(step_id):
source = fetch_objects('step-sources', id=step_id)
try:
question = source[0]['block']['text']
except:
question = '\n'
question = html2latex(question)
return question
def get_step_options(step_id):
source = fetch_objects('step-sources', id=step_id)
try:
options = source[0]['block']['source']['options']
options = pd.DataFrame(options)
is_multiple = source[0]['block']['source']['is_multiple_choice']
except KeyError:
options = pd.DataFrame(columns=['step_id', 'option_id', 'option_name', 'is_correct', 'is_multiple'])
return options
options['step_id'] = step_id
options['is_multiple'] = is_multiple
options = options.sort_values('text').reset_index()
options = options.rename(columns={'text': 'option_name'})
options['option_id'] = options.index + 1
options = options[['step_id', 'option_id', 'option_name', 'is_correct', 'is_multiple']]
return options
def get_step_info(step_id):
info = pd.Series(fetch_objects('steps', pk=step_id)[0])
info = info.rename(columns={'id': 'step_id'})
return info
# IRT functions
def create_answer_matrix(data, user_column, item_column, value_column, aggfunc=np.mean, time_column=None):
if time_column:
# select only the first response
data = data.loc[data.groupby([item_column, user_column])[time_column].idxmin()]
data = data.drop_duplicates(subset=[item_column, user_column])
answers = pd.pivot_table(data, values=[value_column], index=[user_column], columns=[item_column],
aggfunc=aggfunc)
if not answers.empty:
answers = answers[value_column]
return answers
# TODO: add Cronbach's alpha to item statistics
# see http://stackoverflow.com/questions/20799403/improving-performance-of-cronbach-alpha-code-python-numpy
def get_item_statistics(answers, discrimination_prop=0.3):
total_people = answers.shape[0]
n_people = answers.count(axis=0)
# use mean (not sum) because of NA values
item_difficulty = 1 - answers.mean(axis=0)
total_score = answers.mean(axis=1)
item_total_corr = answers.corrwith(total_score)
n_top_people = int(discrimination_prop * total_people)
low_performers = total_score.sort_values().index[:n_top_people]
top_performers = total_score.sort_values().index[-n_top_people:]
item_discrimination = answers.loc[top_performers].mean(axis=0) - answers.loc[low_performers].mean(axis=0)
stats = pd.DataFrame({'item': item_difficulty.index,
'n_people': n_people,
'difficulty': item_difficulty,
'item_total_corr': item_total_corr,
'discrimination': item_discrimination})
stats.reset_index(drop=True, inplace=True)
stats = stats[['item', 'n_people', 'difficulty', 'discrimination', 'item_total_corr']]
return stats
# Video report
def get_video_stats(step_id, cached=True, token=None):
if not token:
token = get_token()
cached_name = 'cache/step-{}-videostats.csv'.format(step_id)
if cached and os.path.isfile(cached_name):
stats = pd.read_csv(cached_name)
return stats
stats = pd.DataFrame(fetch_objects('video-stats', token=token, step=step_id))
if not stats.empty:
stats.to_csv(cached_name, index=False)
stats = pd.read_csv(cached_name)
return stats
def get_video_peaks(stats, plot=False, ax=None, ax2=None):
header = ['start', 'peak', 'end', 'rise_rate', 'is_common',
'width', 'height', 'area']
if stats.empty:
return pd.DataFrame(columns=header)
row = stats.loc[stats.index[0]]
try:
watched_first = np.array(ast.literal_eval(row['watched_first']))
watched_total = np.array(ast.literal_eval(row['watched_total']))
play = np.array(ast.literal_eval(row['play']))
except ValueError:
return pd.DataFrame(columns=header)
# use only shortest data for analyses
video_length = min(len(watched_first), len(watched_total), len(play))
if video_length < MIN_VIDEO_LENGTH:
return pd.DataFrame(columns=header)
watched_first = watched_first[:video_length]
watched_total = watched_total[:video_length]
play = play[:video_length]
play[0] = play[1] # ignore auto-play in the beginning
rewatching = watched_total - watched_first
# To fight the noise, use smoothing technique before analysis
rewatching = get_smoothing_data(rewatching, frac=0.05)
play = get_smoothing_data(play, frac=0.1)
rewatch_windows = detect_peaks(rewatching)
play_windows = detect_peaks(play)
rewatch_windows['is_common'] = False
play_windows['is_common'] = False
# find common windows
for ind, row in rewatch_windows.iterrows():
start = row['start']
end = row['end']
if play_windows.loc[~((play_windows.end < start) | (end < play_windows.start))].shape[0] > 0:
rewatch_windows.loc[ind, 'is_common'] = True
common_windows = rewatch_windows[rewatch_windows.is_common].copy()
if plot:
peak_plot(rewatching, rewatch_windows, ax)
if ax:
ax.set_ylabel('Num rewatchers', fontsize=10)
peak_plot(play, play_windows, ax2)
if ax2:
ax2.set_xlabel('Time in video (seconds)', fontsize=10)
ax2.set_ylabel('Num play events', fontsize=10)
# calculate peak features (normalized width, height, and area)
total_length = len(rewatching)
total_height = max(rewatching)
total_area = sum(rewatching)
if not common_windows.empty:
common_windows['width'] = common_windows.apply(lambda x: (x['end']-x['start'])/total_length, axis=1)
common_windows['height'] = common_windows.apply(lambda x: rewatching[x['peak']]/total_height, axis=1)
common_windows['area'] = common_windows.apply(
lambda x: rewatching[x['start']:x['end']].sum()/total_area, axis=1)
else:
common_windows = pd.DataFrame(columns=header)
return common_windows
def get_smoothing_data(data, frac=0.05):
"""
Return smoothing data based on LOWESS (Locally Weighted Scatterplot Smoothing)
:param data: 1-D numpy array of values
:param frac: Between 0 and 1. The fraction of the data used when estimating each value
:return: 1-D numpy array of smoothing values
"""
smoothing_data = sm.nonparametric.lowess(data, np.arange(len(data)), frac=frac, return_sorted=False)
return smoothing_data
# TwitInfo Peak Detection Algorithm
# Paper:
# http://hci.stanford.edu/publications/2011/twitinfo/twitinfo-chi2011.pdf
# Code source:
# https://github.com/pmitros/LectureScapeBlock/blob/ac16ec00dc018e5b17a8c23a025f98e693695527/lecturescape/lecturescape/algorithms.py
# updated for peaks finding
def detect_peaks(data, tau=1.5):
"""
peak detection algorithm
"""
def detect_peaks_update(old_mean, old_mean_dev, update_value):
ALPHA = 0.125
diff = math.fabs(old_mean - update_value)
new_mean_dev = ALPHA * diff + (1-ALPHA) * old_mean_dev
new_mean = ALPHA * update_value + (1-ALPHA) * old_mean
return [new_mean, new_mean_dev]
bins = data
P = 5
TAU = tau
# list of peaks - their start, end, and peak time
windows = | pd.DataFrame(columns=['start', 'peak', 'end', 'rise_rate']) | pandas.DataFrame |
##Exec Dashboard Project
# PACKAGES and MODULES----------------------------------------------------------
import os
import operator
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
# FUNCTIONS----------------------------------------------------------------------
def to_usd(my_price):
'''
Converts a numeric value to usd-formatted string, for printing and display purposes.
Param: my_price (int or float) like 4000.444444
Example: to_usd(4000.444444)
Returns: $4,000.44
'''
return f'${my_price:,.2f}' # > $12,000.71
def month_num(month):
'''
Converts a full month name to numeric string.
Param: month (string) like February
Example: month_num('February')
Returns: '02'
'''
if month == 'January':
return '01'
elif month == 'February':
return '02'
elif month == 'March':
return '03'
elif month == 'April':
return '04'
elif month == 'May':
return '05'
elif month == 'June':
return '06'
elif month == 'July':
return '07'
elif month == 'August':
return '08'
elif month == 'September':
return '09'
elif month == 'October':
return '10'
elif month == 'November':
return '11'
else:
return '12'
def rev_month_num(mnum):
'''
Converts a numeric month string to full month name.
Param: month number (string) like 02
Example: month_num('02')
Returns: 'February'
'''
if mnum == '01':
return 'January'
elif mnum == '02':
return 'February'
elif mnum == '03':
return 'March'
elif mnum == '04':
return 'April'
elif mnum == '05':
return 'May'
elif mnum == '06':
return 'June'
elif mnum == '07':
return 'July'
elif mnum == '08':
return 'August'
elif mnum == '09':
return 'September'
elif mnum == '10':
return 'October'
elif mnum == '11':
return 'November'
else:
return 'December'
def validate(user_input, ref_list):
'''
Validates user inputs
'''
store=0
for item in ref_list:
if item==user_input:
store+=1
if store>0:
return "match"
elif user_input=="Exit":
return "exit"
else:
return "no match"
def prev_year(user_month, user_year, min_date):
'''
Defines period of up to 12 months prior to month of user input)
'''
if user_month=="01":
m_end="12"
y_end=str(int(user_year)-1)
else:
y_end=user_year
if int(user_month)<=10:
m_end="0"+str(int(user_month)-1)
else:
m_end=str(int(user_month)-1)
m_st=user_month
y_st=str(int(user_year)-1)
comp_str=y_st+m_st
comp_int=int(comp_str)
repl_int=max(int(min_date),comp_int)
repl_str=str(repl_int)
if repl_str!=comp_str:
m_st=repl_str[0:4]
y_st=repl_str[-2:]
return [m_st,y_st,m_end,y_end]
# DATA---------------------------------------------------------------------------
# Create list of files
data_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data')
data_files = os.listdir(data_filepath)
#print(data_files)
#initialize dataframe
master_data = pd.DataFrame()
# import all data
for dfile in data_files:
temp = pd.read_csv(os.path.join(data_filepath, dfile))
master_data=temp.append(master_data,ignore_index=True) # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html
master_data['year']=master_data['date'].str.split('-').str[0]
master_data['month'] = master_data['date'].str.split('-').str[1]
master_data['yearmon']=master_data['year']+master_data['month']
master_data['yearmon num']= | pd.to_numeric(master_data['yearmon']) | pandas.to_numeric |
import os
import pathlib
import spacy
import re
import pandas as pd
import matplotlib.pyplot as plt
from gensim.models.phrases import Phrases, Phraser
ROOT_DIR = pathlib.Path(__file__).parent.parent
# Set directories and create them if necessary
plain_text_dir = pathlib.Path().joinpath(ROOT_DIR,"data","plaintext")
structured_data_dir = pathlib.Path().joinpath(ROOT_DIR,"data","structured")
logging_dir = pathlib.Path().joinpath(ROOT_DIR,"logs")
nlp = spacy.load("es_core_news_sm", disable=['parser', 'ner'])
nlp.Defaults.stop_words |= {"y","a","o"}
nlp.add_pipe(nlp.create_pipe('sentencizer'))
files = os.listdir(plain_text_dir)
paths = [ plain_text_dir.joinpath(file) for file in files]
# Get list of all sentences
sentences = list()
for path in paths:
# Open text file
with open(path,"r",encoding="UTF-8") as f:
text = f.read()
# Load as spacy object
doc = nlp(text)
for sent in doc.sents:
# convert to lemma and remove punctuation, spaces, stop words and proper nouns
sent = [t.lemma_.lower() for t in sent if not (t.is_punct or t.is_space or t.is_stop or t.pos_ == "PROPN")]
sent = [ re.sub("^\W+|\W+$","",lemma) for lemma in sent ]
for old,new in [("casar","casa"),("callar","calle"),("finar","fin"),("manir","mano")]:
sent = [ re.sub(old,new,lemma) for lemma in sent ]
sentences.append(sent)
# get all unigrams
ugrams = [ item for sublist in sentences for item in sublist]
ugrams_freq = pd.Series(ugrams).value_counts()
path = structured_data_dir.joinpath("unigrams_freq.tsv")
ugrams_freq.to_csv(path,sep="\t")
# get all bigrams
phrases = Phrases(sentences, min_count=1, threshold=1)
w_bigrams = [phrases[x] for x in sentences]
bigrams = [ item for sublist in w_bigrams for item in sublist if item.count("_") == 1]
bigrams_freq = pd.Series(bigrams).value_counts()
path = structured_data_dir.joinpath("bigrams_freq.tsv")
bigrams_freq.to_csv(path,sep="\t")
# get all trigrams
trigram_model = Phrases(w_bigrams, min_count=1, threshold=1)
w_trigrams = [trigram_model[x] for x in w_bigrams]
trigrams = [ item for sublist in w_trigrams for item in sublist if item.count("_") == 2]
trigrams_freq = | pd.Series(trigrams) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 5 12:51:54 2016
@author: tkc
"""
import os, re, glob
import pandas as pd
import numpy as np
from math import factorial # used by Savgol matrix
from io import StringIO
import datetime
from scipy import optimize
def rangefromstring(x):
result = []
for part in x.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return result
def unpackfitregs(df):
''' Loaded data frame has ev range, list of background regions and fit type
unpack from dataframe into list of each
1) fitrange is total ev range (i.e.0-100), 2 )backgroundregions are index #s (or energy in eV) of regions commonly without peaks
3) fittype (mostly parabola) and 4) threshold for derivative knockout
add overlap range between adjacent fits'''
Fitregions=[]
overlapregs=[] # list of lists containing adjacent overlapping index ranges
# TODO test to ensure that normal eV range corresponds to range of indices
for i in range(0,len(df)):
tempstr=df.iloc[i]['Backgroundregs']
indexrange=rangefromstring(tempstr) # converts string describing range to actual range
Fitregions.append([df.iloc[i]['Fitrange'],indexrange,df.iloc[i]['Fittype'], df.iloc[i]['Threshold']])
return Fitregions
def makesavgol(df):
'''Perform python smooth-diff used to guide selection of background regions for SEM-EDX spectra
'''
df['Savgol']=0.0 # add/initialize col for 2nd deriv Sav-gol
thisdim=len(df)
thisreg=df['Counts'] # convert to Series (keep these index)
myarr=np.asarray(thisreg) # convert to numpy array
window_size=11
deriv=2
order=2 # order of savgol fit
rate=1
order_range = range(order+1) # range object
half_window = (window_size -1) // 2 # type int
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
# b is matrix 3 by window size
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv) # series as long as array
# linalg.pinv gets pseudo-inverse of a matrix (window-sized series)
# .A of any matrix returns it as ndarray object
# Pad the signal at the extremes with values taken from the signal itself
firstvals = myarr[0] - np.abs(myarr[1:half_window+1][::-1] - myarr[0] )
lastvals = myarr[-1] + np.abs(myarr[-half_window-1:-1][::-1] - myarr[-1])
myarr= np.concatenate((firstvals, myarr, lastvals))
# Now convolve input signal and sav-gol processing 1D array .. thisreg is numpy array w/ savgol results
myarr=np.convolve( myarr, m[::-1], mode='valid')
thisreg=pd.Series(myarr) # convert array to series
thisreg.loc[0:thisdim]=myarr # copies numpy array but keeps same indices
df['Savgol']=thisreg # copy deriv column to dataframe
return df # returns savitsky-golay smooth diff over same full region
def openSEM(SEMfileName):
'''Open csv as dataframe if it exists or if not strip header from psmsa/emsa and import as dataframe '''
csvname=str(SEMfileName.split('.')[0])+'.csv'
try:
SEMfile=pd.read_csv(csvname, encoding='cp437')
except: # if csv doesn't exist, just open/strip psmsa
with open(SEMfileName, 'r') as file:
filedata = file.read()
filedata =filedata.split('#SPECTRUM :')[1]
filedata =filedata.split('#ENDOFDATA : ')[0]
thisdata=StringIO(filedata)
SEMfile=pd.read_csv(thisdata)
try:
SEMfile=SEMfile.drop(SEMfile.columns[[2]], axis=1) # drop erroneous 3rd column if present
except:
print('') # ignore error if 3rd column not present
SEMfile.columns=['Energy','Counts']
return SEMfile # should return data as pandas dataframe
def fitparabola(df, SEMfileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform polynomial/parabola fit
return chunk with backfit column added '''
xcol=df['Energy']
ycol=df['Counts'] # Counts1, Counts2 or whatever
# find relative minimum
try:
A,B,C=np.polyfit(xcol, ycol, 2)
except: # deal with common problems with linregress
print('Fitting error from ', "{0:.2f}".format(df.Energy.min()),'to ',"{0:.2f}".format(df.Energy.max()), ' in file ', SEMfileName)
fitparams=('n/a','n/a','n/a') # return all n/a
return df, fitparams
fitparams=(A, B, C) # tuple to return coeffs of 2nd order poly fit
for index,row in df.iterrows(): # write this fit into this chunk of data (redundant?)
xval=df.loc[index]['Energy']
yval= A * xval**2+ B * xval + C
df=df.set_value(index, 'Backfit', yval)
return df, fitparams
def findfitregion(df, fitregion, threshold):
'''Passing single list of allowable index #s for background fits (no duplicates)
remove those with high from list of allowable indices any that show high smoothed-derivatives (i.e. not good for background fitting '''
Backfitdf=df.ix[[x for x in fitregion]] # filter out those not in allowable background ranges
# these are loaded from SEM_backfit_regions.csv
Backfitdf=Backfitdf.dropna(subset=['Counts']) # drops above (set to na by ix)
# now additionally filter out those with derivative above threshold value
Backfitdf=Backfitdf[(Backfitdf['Savgol']<threshold) & (Backfitdf['Savgol']>-threshold)]
return Backfitdf
def findelemregions(Elements, SEMquantparams):
''' Takes element string and returns standard Elemdata for each elem symbol containing params
needed for peak finding and quant
tuple for integ peak is symbol, ideal peak index #, and integ kfactor
don't apply energy shifts here... apply later when doing integrate'''
Elemdata=[]
try:
for i, elem in enumerate(Elements):
# find row in AESquantparams for this element
thiselemdata=SEMquantparams[(SEMquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
# integ peak position value is relative to negpeak in smooth-diff (i.e. -5 is 5 eV below ideal negpeak)
idealindex=int((thiselemdata.energy+.01)*100) # ideal index value of SEM-EDX peak from energy in keV
kfact=thiselemdata.kfactor # typical sensitivity k-factor associated with element for integration
errkfact=thiselemdata.errkfact
mass=thiselemdata.mass
# full peak width in keV from SEMquantparams (usually 0.15keV or 15 channels at 0.1eV/chan)
width=int(((thiselemdata.fullwidth*100)-1)/2) # integration width in channels for direct integration for this element
# total # of channels in AESquantparams but include n-1/2 channels on either side of peak center (usually width is 8 channels)
#Elemdata is a list (of length number of elements) containing length5 tuples
elemtuple=(elem, idealindex, width, kfact, errkfact, mass) # add tuple with info for this element
Elemdata.append(elemtuple) # now contains proper limits on fitting regions
except:
print('Quant parameters are not properly loaded.')
return Elemdata
def fitgauss(df, halfwidth, elem, SEMfileName, savegauss=True):
''' Gaussian fit of direct peaks (pass SEMfile just around peaks region
no need to save Gaussian fit, just return width and other params
integwidth pass from AESquantparams value'''
# Remove any nan values from peak region (shouldn't be any though)
df=df.dropna(subset=['Subdata']) # remove nan entries from peak
# Estimate initial Gaussian parameters from data
xc=df['Subdata'].idxmax() # estimate center based on peak max index
xc=df.loc[xc]['Energy'] # associated energy value near center
peakarea=df['Subdata'].sum() # decent area estimate
y0=0 #
width=0.01*(2*halfwidth+1) # full width estimate in keV from half-width in channels
params0=[xc,width,peakarea,y0] # initial params list (first guess at gaussian params)
xcol=df['Energy']
ycol=df['Subdata']
xcol=xcol.as_matrix() # convert both to numpy matrices
ycol=ycol.as_matrix()
# define standard gaussian funct (xc, width, area and yoffset are init params)
gaussian=lambda params, x: params[3]+params[2]/(params[1]*np.sqrt(2*np.pi))*np.exp(-((x-params[0])**2/(2*params[1]**2)))
# thisgauss= gaussian(params0,xcol)
errfunc=lambda p, xcol, ycol: ycol- gaussian(p,xcol) # lambda error funct definition
# sigma2FWHM = lambda sigma: sigma * sqrt(2 * log(2)) * 2 / sqrt(2) # convert Gaussian widths to FWHM?
try:
fitparams, cov, infodict, mesg, ier =optimize.leastsq(errfunc,params0,args=(xcol,ycol),full_output=True)
ss_err=(infodict['fvec']**2).sum()
ss_tot=((ycol-ycol.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
except: # fitting problem
print('Gaussian fitting error for', elem, ' peak in file ', SEMfileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
if savegauss==True:
df['Gauss']='' # add col for gaussian fit
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval=fitparams[3]+fitparams[2]/(fitparams[1]*np.sqrt(2*np.pi))*np.exp(-((xval-fitparams[0])**2/(2*fitparams[1]**2)))
df.set_value(index,'Gauss',yval)
return df, fitparams, rsquared, ier
def fitpeaks(SEMfile, Elemdata, logmatch, savegauss=True):
''' Gaussian fit of major peaks in single spectrum, shift is list of energy shifts of negpeak (same order as Eledata (opens source spectrum as SEMfile,
fits peak backgrounds above and below using Elemdata, also saves linear fit params to logdataframe with position/amplitude/etc;
desired elements out of data range are skipped (in prior findindices function)
# Saving of gaussian fits of peaks could be stored as separate csv if this was ever desired... probably not
'''
SEMfileName=logmatch.Filename # only used for error reporting
# Create temp df to hold and pass linear fit data
mycols=['Basename', 'Filenumber', 'Point','Filename', 'Filepath', 'Sample', 'Comments', 'Element',
'Xc', 'Width', 'Peakarea', 'Y0','Rsquared'] # for gaussian peak fits
mycols2=['Basename','Filenumber', 'Point', 'Filename', 'Filepath', 'Sample', 'Comments', 'Element', 'Energy', 'Shift', 'Rawcounts',
'Backcounts', 'Subtractedcounts', 'Adj counts', '% err', 'Significance' , 'Basis', 'Errbasis',] # for integration results
Peakfits=pd.DataFrame(columns=mycols) # blank df for this spectrum's peak fits
Integresults=pd.DataFrame(columns=mycols2) # blank df for this spectrum's integration results
# fit all elemental peaks with gaussian, determine shift and perform integration (incl. error)
for i, (elem, idealindex, halfwidth, kfact, errkfact, mass) in enumerate(Elemdata):
Peakfitrow=pd.DataFrame(index=np.arange(0,1),columns=mycols) # single dataframe row for this
Integresultrow=pd.DataFrame(index=np.arange(0,1),columns=mycols2) # blank df row
# linear fit below this elem's peak (shifts and adjustments already made)
# use 10 more channels than those used for integration for gaussian fits
fitregion=SEMfile[idealindex-halfwidth-5:idealindex+halfwidth+6]
if fitregion.empty==True: # skip if no data present (peak out of range problem)
continue
# Gaussian fit of subtracted data peaks > 50 cnts
if fitregion['Subdata'].max()>50:
fitregion, fitparams, rsquared, ier = fitgauss(fitregion, halfwidth, elem, SEMfileName, savegauss=True)
if savegauss==True: # save Gaussian peaks as separate column
if 'Gauss' not in SEMfile.dtypes.index: # add col if not already present
SEMfile['Gauss']='' # add blank col for gaussian fit if not present
# copy gaussian fit to Augerfile... fitregion only modified in new Gauss peak fit column
SEMfile.loc[fitregion.index,fitregion.columns]=fitregion
# determination of peak shift
# If gaussian fit is successful set center integration channel to index nearest xc
# ier flag of 1,2,3,4 if fit succeeds but rsquared threshold is better
if rsquared!='n/a': # somewhat successful gaussian fit
if rsquared>0.4:
xc=fitparams[0] # center of gaussian fit in keV
centerindex=int((xc+.01)*100)
shift= centerindex- idealindex # energy shift in channels
if abs(shift)>3:
print('Warning: Gaussian shift of ', str(shift), ' channels indicated for ', elem, ' in ', SEMfileName)
if shift>0: # keep peak shift the same but only allow 3 channel shift in integration window
centerindex=idealindex+3 # set to max shift
else:
centerindex=idealindex-3
# TODO Maybe a better way of setting maximum allowable shift
else:
print('Low quality gaussian fit for ', elem, ' in ', SEMfileName)
centerindex=idealindex # already stores index number of central peak (ideal - sm-diff shift value)
shift='n/a'
# Write gaussian fit params to peakfit (eventually copied to peakfitlog)
else: # Fit attempted but failed result
print ('Fit attempted but result failed for ', elem, ' in ', SEMfileName)
fitparams=['n/a','n/a','n/a','n/a']
rsquared='n/a'
else: # indication of failed Gaussian fit (use prior knowledge of peak position)
print('Skip gaussian fit of tiny ', elem, ' peak in ', SEMfileName)
# set center integration channel to value passed by integpeak
# this is ideal energy value but adjusted by shift found using smooth-diff quant method
centerindex=idealindex # already stores index number of central peak (ideal - sm-diff shift value)
shift='n/a'
fitparams=['n/a','n/a','n/a','n/a']
rsquared='n/a'
# Perform integration over peak center channel + integwidth on either side
SEMpeak=SEMfile[centerindex-halfwidth:centerindex+halfwidth+1]
integcounts=SEMpeak['Subdata'].sum() # get counts sum
backgroundcnts=SEMpeak['Backfit'].sum() # sum counts over identical width in background fit
# Used for peak significance i.e. typically 2 sigma of background integration over identical width
# full integ width is 1.2*FWHM but integwidth here is closest integer half-width
# end of element loop
Peakfitrow.loc[0]['Element']=elem
Peakfitrow.loc[0]['Xc']=fitparams[0]
Peakfitrow.loc[0]['Width']=fitparams[1]
Peakfitrow.loc[0]['Peakarea']=fitparams[2]
Peakfitrow.loc[0]['Y0']=fitparams[3]
Peakfitrow.loc[0]['Rsquared']=rsquared
Peakfits=pd.concat([Peakfits, Peakfitrow], ignore_index=True) # copy peak rows individually to df
# Copy integration results for this peak into df row
Integresultrow.iloc[0]['Element']=elem
Integresultrow.iloc[0]['Energy']=centerindex # index of center as determined by fitting (if successful)
Integresultrow.iloc[0]['Shift']=shift # energy shift from ideal in channels (0.01 eV)
Integresultrow.iloc[0]['Rawcounts']=SEMpeak['Counts'].sum()
Integresultrow.iloc[0]['Backcounts']=backgroundcnts
Integresultrow.iloc[0]['Subtractedcounts']=integcounts
# Adjusted counts must be determined later for pathological overlaps
# 2 sigma err due to counting statistics
Integresultrow.iloc[0]['% err']=round(2/np.sqrt(integcounts),3)
Integresultrow.iloc[0]['Significance']=round(integcounts/(np.sqrt(backgroundcnts)),3)
# TODO add 2/sqrt(n) calc of associated percent error (also can calculate later)
Integresultrow.iloc[0]['Basis']=integcounts*kfact/mass
# Calculated combined error for 2sig counting stats + loaded k-factor error
comberr=np.sqrt(errkfact**2+(2/np.sqrt(integcounts))**2)
# calculate error in basis for given elemental peak
Integresultrow.iloc[0]['Errbasis']=(integcounts*kfact/mass)*comberr
Integresults=pd.concat([Integresults,Integresultrow], ignore_index=True)
# assign params that are common to this spectrum (all elemental peaks)
for index,row in Peakfits.iterrows():
Peakfits.loc[index]['Filenumber']=logmatch.Filenumber
Peakfits.loc[index]['Basename']=logmatch.Basename
Peakfits.loc[index]['Filename']=logmatch.Filename
Peakfits.loc[index]['Point']=logmatch.Point
Peakfits.loc[index]['Filepath']=logmatch.FilePath
Peakfits.loc[index]['Sample']=logmatch.Sample
Peakfits.loc[index]['Comments']=logmatch.Comments
for index,row in Integresults.iterrows(): # assign
Integresults.loc[index]['Filenumber']=logmatch.Filenumber
Integresults.loc[index]['Filename']=logmatch.Filename
Integresults.loc[index]['Basename']=logmatch.Basename
Integresults.loc[index]['Point']=logmatch.Point
Integresults.loc[index]['Filepath']=logmatch.FilePath
Integresults.loc[index]['Sample']=logmatch.Sample
Integresults.loc[index]['Comments']=logmatch.Comments
Peakfits=Peakfits[mycols] # put back in original order
Integresults=Integresults[mycols2] # put back in original order
return SEMfile, Peakfits, Integresults # df with direct peak fitting info for all areas/ all elements
def fitbackgrounds(SEMfile, Fitregions, logmatch, oddspectrum=False):
''' Background fit for each direct peak(opens source spectrum as SEMfile,
fits peak backgrounds above and below using Elemdata, saves background to source csv (overwrites existing fits),
also saves linear fit params to logdataframe with position/amplitude/etc;
Fitregions stores total ev range, background regions, fit type and thresholdfor deriv knockout '''
# Create temp df to hold and pass linear fit data
SEMfileName=logmatch.Filename #
mycols=['Basename', 'Filenumber', 'Filename', 'FilePath', 'Sample', 'Comments', 'Date', 'Point', 'Beamkv',
'Livetime','Timeconst','Deadfraction','Fitrange', 'Fittype', 'A', 'B', 'C', 'D', 'Rval', 'Pval', 'Stderr']
Backfitparams=pd.DataFrame(columns=mycols) # empty df to hold all rows from this spectrum
# all fit regions modify fit region boundaries for this spectrum based on smooth-differentiated peak (2nd deriv, Savgol (poly=2, pts=11))
# global shifts from smdifpeaks and local shift based on smoothed 2nd derivative
# already incorporated into Elemdata values (lower1,2 and upper1,2 fully adjusted)
# loop through and fit all peaks for each element in this spatial area
for i, [fitrange, fitregs, fittype, threshold] in enumerate(Fitregions):
# create new df row for each fitted range
Backfitparamrow=pd.DataFrame(index=np.arange(0,1),columns=mycols)
# modify fit region for this spectrum (eliminate those with high derivative )
# Threshold level defined by many attempted fits to actual data
Thisbackfit=findfitregion(SEMfile, fitregs, threshold)
# Force counts to zero near origin
for index,row in Thisbackfit.iterrows():
if index < 5:
Thisbackfit=Thisbackfit.set_value(index,'Counts',0)
# now do parabolic fit over this region (return df with backfit col)
if fittype=='parabola':
Thisbackfit, fitparams = fitparabola(Thisbackfit, SEMfileName)
# unpack polynomial fit parameters
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
# now copy this function over entire range of fit
lower=int(fitrange.split('-')[0])
upper=int(fitrange.split('-')[1])
# TODO these are essentially index numbers (since energy in eV is nearly same as index range)
if A!='n/a': # test for successful fit (all fitparams set to n/a)
for i in range(lower,upper):
xval=SEMfile.iloc[i]['Energy']
SEMfile.set_value(i,'Backfit',A * xval**2 + B * xval + C) # just set values directly from fit results
# now store values forthis df row (slower but easy)
Backfitparamrow.iloc[0]['Fitrange'] = fitrange
Backfitparamrow.iloc[0]['Fittype'] = fittype
Backfitparamrow.iloc[0]['A'] = A # parabolic fit params
Backfitparamrow.iloc[0]['B'] = B
Backfitparamrow.iloc[0]['C'] = C
# TODO test and incorporate other fit types
# Set value for subtracted spectral data (nan if nothing in backfit)
SEMfile['Subdata']=SEMfile['Counts']-SEMfile['Backfit']
# concatentate single row with log
Backfitparams=pd.concat([Backfitparams, Backfitparamrow], ignore_index=True)
# END OF BACKGROUND FITTING LOOP FOR SINGLE SPECTRUM
# create subtracted peak for entire spectrum
# assign params that are common to all areas/all peaks into rows of df (copied from original log)
for index, row in Backfitparams.iterrows():
Backfitparams.loc[index]['Basename']=logmatch.Basename
Backfitparams.loc[index]['Filenumber']=logmatch.Filenumber
Backfitparams.iloc[index]['Filename']=logmatch.Filename
Backfitparams.iloc[index]['FilePath']=logmatch.FilePath
Backfitparams.iloc[index]['Sample']=logmatch.Sample
Backfitparams.iloc[index]['Point']=logmatch.Point
Backfitparams.iloc[index]['Comments']=logmatch.Comments
Backfitparams.iloc[index]['Date']=logmatch.Date
Backfitparams.iloc[index]['Beamkv']=logmatch.Beamkv
Backfitparams.iloc[index]['Livetime']=logmatch.Livetime
Backfitparams.iloc[index]['Timeconst']=logmatch.Timeconst
Backfitparams.iloc[index]['Deadfraction']=logmatch.Deadfraction
Backfitparams=Backfitparams[mycols] # put back in original order
return SEMfile, Backfitparams # df with direct peak fitting info for all areas/ all elements
def batchSEMquant(SEMfiles, Fitregionsdf, SEMquantparams, Elements, overwrite=True, savegauss=True):
''' Batch quantification of all peaks in Elements list
returns df with peak positions, amplitudes, width, energy shift, etc. '''
# create empty dataframe for storing/passing linear fit params (same structure as in fitbackgrounds)
mycols=['Basename', 'Filenumber', 'Filename', 'FilePath', 'Sample', 'Comments', 'Date', 'Point', 'Beamkv',
'Livetime','Timeconst','Deadfraction','Fitrange', 'Fittype', 'A', 'B', 'C', 'D', 'Rval', 'Pval', 'Stderr']
Backfitparamslog=pd.DataFrame(columns=mycols) # empty df to hold all fits/ all spectra
# black df for gaussian peak fit results
mycols2=['Basename', 'Filenumber', 'Point','Filename', 'Filepath', 'Sample', 'Comments', 'Element',
'Xc', 'Width', 'Peakarea', 'Y0','Rsquared'] # for gaussian peak fits
Peakfitlog=pd.DataFrame(columns=mycols2)
# Now a blank frame for integrated quant results
mycols3=['Basename','Filenumber', 'Point', 'Filename', 'Filepath', 'Sample', 'Comments', 'Element', 'Energy', 'Shift', 'Rawcounts',
'Backcounts', 'Subtractedcounts', 'Adj counts', '% err', 'Significance' , 'Basis', 'Errbasis',] # for integration results
Integquantlog= | pd.DataFrame(columns=mycols3) | pandas.DataFrame |
"""
Evaluate vega expressions language
"""
import datetime as dt
from functools import reduce, wraps
import itertools
import math
import operator
import random
import sys
import time as timemod
from typing import Any, Callable, Dict, Optional, List, Union, overload
import numpy as np
import pandas as pd
from dateutil import tz
from altair_transform.utils import evaljs, undefined, JSRegex
def eval_vegajs(expression: str, datum: pd.DataFrame = None) -> pd.DataFrame:
"""Evaluate a vega expression"""
namespace = {"datum": datum} if datum is not None else {}
namespace.update(VEGAJS_NAMESPACE)
return evaljs(expression, namespace)
def vectorize(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args, **kwargs):
series_args = [
arg
for arg in itertools.chain(args, kwargs.values())
if isinstance(arg, pd.Series)
]
if not series_args:
return func(*args, **kwargs)
else:
index = reduce(operator.or_, [s.index for s in series_args])
def _get(x, i):
return x.get(i, math.nan) if isinstance(x, pd.Series) else x
return pd.Series(
[
func(
*(_get(arg, i) for arg in args),
**{k: _get(v, i) for k, v in kwargs.items()},
)
for i in index
],
index=index,
)
if hasattr(func, "__annotations__"):
wrapper.__annotations__ = {
key: Union[pd.Series, val] for key, val in func.__annotations__.items()
}
return wrapper
# Type Checking Functions
@vectorize
def isArray(value: Any) -> bool:
"""Returns true if value is an array, false otherwise."""
return isinstance(value, (list, np.ndarray))
@vectorize
def isBoolean(value: Any) -> bool:
"""Returns true if value is a boolean (true or false), false otherwise."""
return isinstance(value, (bool, np.bool_))
@vectorize
def isDate(value: Any) -> bool:
"""Returns true if value is a Date object, false otherwise.
This method will return false for timestamp numbers or
date-formatted strings; it recognizes Date objects only.
"""
return isinstance(value, dt.datetime)
@vectorize
def isDefined(value: Any) -> bool:
"""Returns true if value is a defined value, false if value equals undefined.
This method will return true for null and NaN values.
"""
# TODO: support implicitly undefined values?
return value is not undefined
@vectorize
def isNumber(value: Any) -> bool:
"""Returns true if value is a number, false otherwise.
NaN and Infinity are considered numbers.
"""
return np.issubdtype(type(value), np.number)
@vectorize
def isObject(value: Any) -> bool:
"""Returns true if value is an object, false otherwise.
Following JavaScript typeof convention, null values are considered objects.
"""
return value is None or isinstance(value, dict)
@vectorize
def isRegExp(value: Any) -> bool:
"""
Returns true if value is a RegExp (regular expression)
object, false otherwise.
"""
return isinstance(value, JSRegex)
@vectorize
def isString(value: Any) -> bool:
"""Returns true if value is a string, false otherwise."""
return isinstance(value, str)
@vectorize
def isValid(value: Any) -> bool:
"""Returns true if value is not null, undefined, or NaN."""
return not (value is None or value is undefined or pd.isna(value))
# Type Coercion Functions
@vectorize
def toBoolean(value: Any) -> bool:
"""
Coerces the input value to a boolean.
Null values and empty strings are mapped to null.
"""
return bool(value)
@vectorize
def toDate(value: Any) -> Optional[float]:
"""
Coerces the input value to a Date instance.
Null values and empty strings are mapped to null.
If an optional parser function is provided, it is used to
perform date parsing, otherwise Date.parse is used.
"""
if isinstance(value, (float, int)):
return value
if value is None or value == "":
return None
return | pd.to_datetime(value) | pandas.to_datetime |
"""
Tests the financial data structures
"""
import unittest
import os
import numpy as np
import pandas as pd
from mlfinlab.data_structures import imbalance_data_structures as ds
class TestDataStructures(unittest.TestCase):
"""
Test the various financial data structures:
1. Imbalance Dollar bars
2. Imbalance Volume bars
3. Imbalance Tick bars
"""
def setUp(self):
"""
Set the file path for the tick data csv
"""
project_path = os.path.dirname(__file__)
self.path = project_path + '/test_data/imbalance_sample_data.csv'
def test_imbalance_dollar_bars(self):
"""
Tests the imbalance dollar bars implementation.
"""
exp_num_ticks_init = 1000
num_prev_bars = 3
db1 = ds.get_dollar_imbalance_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
num_prev_bars=num_prev_bars, batch_size=2e7, verbose=False)
db2 = ds.get_dollar_imbalance_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False)
db3 = ds.get_dollar_imbalance_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_dollar_imbalance_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv')
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db1.shape == db4.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db1.values == db4.values))
# Assert OHLC is correct
self.assertTrue(db1.loc[0, 'open'] == 1306.0)
self.assertTrue(db1.loc[0, 'high'] == 1306.00)
self.assertTrue(db1.loc[0, 'low'] == 1304.25)
self.assertTrue(db1.loc[0, 'close'] == 1304.5)
self.assertTrue((db1.loc[:, 'high'] >= db1.loc[:, 'low']).all())
# delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_imbalance_volume_bars(self):
"""
Tests the imbalance volume bars implementation.
"""
exp_num_ticks_init = 100
num_prev_bars = 3
db1 = ds.get_volume_imbalance_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
num_prev_bars=num_prev_bars, batch_size=1000, verbose=False)
db2 = ds.get_volume_imbalance_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False)
db3 = ds.get_volume_imbalance_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
num_prev_bars=num_prev_bars, batch_size=10, verbose=False)
ds.get_volume_imbalance_bars(self.path, exp_num_ticks_init=exp_num_ticks_init,
num_prev_bars=num_prev_bars, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = | pd.read_csv('test.csv') | pandas.read_csv |
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.patches import Polygon
import matplotlib
from utils import load_obj
show_plot = False
cols = ["dataset", "period", "clf", "magic", "model_params", "k", "bot_thresh",
"top_thresh", "mode", "trade_frequency", "start_trade", "final_trade",
"time", "min", "max", "mean", "last"]
def add_legend(fig):
# Finally, add a basic legend
fig.text(0.8005, 0.135, '-', color='red', backgroundcolor='silver',
weight='roman', size='medium')
fig.text(0.825, 0.135, ' S&P 500 Index', color='black',
weight='roman',
size='x-small')
fig.text(0.8005, 0.17, '*', color='white', backgroundcolor='silver',
weight='roman', size='medium')
fig.text(0.825, 0.17, ' Average Value', color='black', weight='roman',
size='x-small')
def plot_by_k(results):
# plot by model
ks = [10, 25, 50]
data = [results[pd.to_numeric(results.k) == k]['last'].values / 1e6 for k in ks]
k_names = [20, 50, 100]
fig, ax1 = plt.subplots(figsize=(5, 9))
fig.canvas.set_window_title('Revenues per different portfolio sizes')
fig.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
plt.setp(bp['medians'], color='black')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.ticklabel_format(axis='y', style='plain')
ax1.set_title('Comparison of total revenues by portfolio sizes')
ax1.set_xlabel('Portfolio size')
ax1.set_ylabel('Total revenue in million U.S. dollars')
ax1.axhline(y=276480 / 1e6, color='red', linestyle='--', alpha=0.4)
# Now fill the boxes with desired colors
boxColors = ['royalblue', 'royalblue']
numBoxes = len(data)
medians = list(range(numBoxes))
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
k = i % 2
boxCoords = np.column_stack([boxX, boxY])
boxPolygon = Polygon(boxCoords, facecolor=boxColors[k])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
# ax1.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
ax1.plot([np.average(med.get_xdata())], [np.average(data[i])],
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, numBoxes + 0.5)
top = max([max(x) for x in data if len(x) > 0]) * 1.1
bottom = min([min(x) for x in data if len(x) > 0]) * 5
ax1.set_ylim(-1, top)
ax1.set_xticklabels(k_names, fontsize=10)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(numBoxes) + 1
upperLabels = [str(np.round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(numBoxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], top - (top * 0.05), upperLabels[tick],
horizontalalignment='center', size='x-small',
weight=weights[k],
color=boxColors[k])
add_legend(fig)
plt.gcf().subplots_adjust(left=0.1)
# plt.tight_layout()
plt.savefig('ks', bbox_inches='tight')
if show_plot:
plt.show()
def plot_by_model(results):
# plot by model
models = ['SVR', 'RFR', 'MLPR', 'AdaBR']
data = [results[results.clf == clf]['last'].values / 1e6 for clf in models]
model_names = ['SVM', 'Random Forest', 'Neural Network',
'AdaBoost']
fig, ax1 = plt.subplots(figsize=(7, 9))
fig.canvas.set_window_title('Revenues per model')
fig.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
plt.setp(bp['medians'], color='black')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.ticklabel_format(axis='y', style='plain')
ax1.set_title('Comparison of total revenues for different models')
ax1.set_xlabel('Models')
ax1.set_ylabel('Total revenue in million U.S. dollars')
ax1.axhline(y=276480 / 1e6, color='red', linestyle='--', alpha=0.4)
# Now fill the boxes with desired colors
boxColors = ['royalblue', 'royalblue']
numBoxes = len(data)
medians = list(range(numBoxes))
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
k = i % 2
boxCoords = np.column_stack([boxX, boxY])
boxPolygon = Polygon(boxCoords, facecolor=boxColors[k])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
# ax1.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
ax1.plot([np.average(med.get_xdata())], [np.average(data[i])],
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, numBoxes + 0.5)
top = max([max(x) for x in data if len(x) > 0]) * 1.1
bottom = min([min(x) for x in data if len(x) > 0]) * 5
ax1.set_ylim(-1, top)
ax1.set_xticklabels(model_names, fontsize=10)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(numBoxes) + 1
upperLabels = [str(np.round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(numBoxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], top - (top * 0.05), upperLabels[tick],
horizontalalignment='center', size='x-small',
weight=weights[k],
color=boxColors[k])
add_legend(fig)
plt.gcf().subplots_adjust(left=0.1)
# plt.tight_layout()
plt.savefig('models', bbox_inches='tight')
if show_plot:
plt.show()
def to_df_col(pfs, name):
index = [p._day_str for p in pfs]
data = [(p.total_money - p.fees) / 1e6 for p in pfs]
return pd.DataFrame(data=data, index=index, columns=[name])
def get_trends_df(results):
best = results.groupby('clf')[['last']].max()
worst = results.groupby('clf')[['last']].min()
sp500 = | pd.read_csv('../sp500.csv') | pandas.read_csv |
import teneto
import tvc_benchmarker
import numpy as np
import pandas as pd
def dfc_calc(data,methods=['SW','TSW','SD','JC','TD'],sw_window=63,taper_name='norm',taper_properties=[0,10],sd_distance='euclidean',mtd_window=7,mi='alpha',colind=None):
"""
Required parameters for the various differnet methods:
If method == 'SW'
sw_window = [Integer]
Length of sliding window
If method == 'TSW'
sw_window = [Integer]
Length of sliding window
taper_name = [string]
Name of scipy.stats distribution used (see teneto.derive.derive for more information)
taper_properties = [list]
List of the different scipy.stats.[taper_name] properties. E.g. if taper_name = 'norm'; taper_properties = [0,10] with me the mean and standard deviation of the distribution.
If method == 'SD'
sd_distance = [string]
Distance funciton used to calculate the similarity between time-points. Can be any of the distances functions in scipy.spatial.distance.
if method == 'JC'
There are no parmaeters, have empty dictionary as parameter input.
if method == 'MTD'
mtd_window= [Integer]
Length of window
# mi='alpha'
"""
# If data is a string, load precalcuated data
if isinstance(data, str):
if data == 'sim-1' and not colind:
colind = 1
elif (data == 'sim-2' or data == 'sim-3' or data == 'sim-4') and not colind:
colind = 2
elif colind:
pass
else:
raise ValueError('unknown simulation. Input must be "sim-1", "sim-2", "sim-3" or "sim-4"')
df = pd.read_csv(tvc_benchmarker.__path__[0] + '/data/dfc/' + data + '_dfc.csv',index_col=np.arange(0,colind))
# Get methods
requested_methods = list(set(methods).intersection(df.columns))
df[requested_methods]
#Otherwise calculate
else:
# Make methods variable a list if single string is given
if isinstance(methods,str):
methods = [methods]
if isinstance(mi,str):
mi = [mi]
dfc={}
params = {}
for m in mi:
params[m] = np.unique(data.index.get_level_values(m))
mi,mi_num,mi_parameters,mi_param_list = tvc_benchmarker.multiindex_preproc(params,mi)
#Sliding window
if 'SW' in methods:
dfc['SW'] = []
dfc_params={}
dfc_params['windowsize']=sw_window
dfc_params['method'] = 'slidingwindow'
dfc_params['dimord'] = 'node,time'
dfc_params['postpro'] = 'fisher'
dfc_params['report'] = 'no'
if mi_parameters[0]:
# Do this if there are multiple mi parameters
for sim_it, mi_params in enumerate(mi_parameters):
ts1 = data['timeseries_1'][mi_params]
ts2 = data['timeseries_2'][mi_params]
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['SW'].append(np.lib.pad(connectivity,int((sw_window-1)/2),mode='constant',constant_values=np.nan))
# Otherwise do this
else:
ts1 = data['timeseries_1']
ts2 = data['timeseries_2']
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['SW'].append(np.lib.pad(connectivity,int((sw_window-1)/2),mode='constant',constant_values=np.nan))
# Line up all appened arrays
dfc['SW'] = np.concatenate(dfc['SW'])
#Tapered sliding window
if 'TSW' in methods:
dfc['TSW'] = []
dfc_params={}
dfc_params['windowsize']=sw_window
dfc_params['distribution']=taper_name
dfc_params['distribution_params']=taper_properties
dfc_params['method'] = 'taperedslidingwindow'
dfc_params['dimord'] = 'node,time'
dfc_params['postpro'] = 'fisher'
dfc_params['report'] = 'no'
if mi_parameters[0]:
# Do this if there are multiple mi parameters
for sim_it, mi_params in enumerate(mi_parameters):
ts1 = data['timeseries_1'][mi_params]
ts2 = data['timeseries_2'][mi_params]
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['TSW'].append(np.lib.pad(connectivity,int((sw_window-1)/2),mode='constant',constant_values=np.nan))
# Otherwise do this
else:
ts1 = data['timeseries_1']
ts2 = data['timeseries_2']
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['TSW'].append(np.lib.pad(connectivity,int((sw_window-1)/2),mode='constant',constant_values=np.nan))
# Line up all appened arrays
dfc['TSW'] = np.concatenate(dfc['TSW'])
#Spatial distance
if 'SD' in methods:
dfc['SD'] = []
dfc_params={}
dfc_params['distance']='euclidean'
dfc_params['method'] = 'spatialdistance'
dfc_params['dimord'] = 'node,time'
dfc_params['postpro'] = 'fisher'
dfc_params['report'] = 'no'
if mi_parameters[0]:
# Do this if there are multiple mi parameters
for sim_it, mi_params in enumerate(mi_parameters):
ts1 = data['timeseries_1'][mi_params]
ts2 = data['timeseries_2'][mi_params]
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['SD'].append(connectivity)
# Otherwise do this
else:
ts1 = data['timeseries_1']
ts2 = data['timeseries_2']
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['SD'].append(connectivity)
# Line up all appened arrays
dfc['SD'] = np.concatenate(dfc['SD'])
#Jackknife
if 'JC' in methods:
dfc['JC'] = []
dfc_params={}
dfc_params['method'] = 'jackknife'
dfc_params['dimord'] = 'node,time'
dfc_params['postpro'] = 'fisher'
dfc_params['report'] = 'no'
if mi_parameters[0]:
# Do this if there are multiple mi parameters
for sim_it, mi_params in enumerate(mi_parameters):
ts1 = data['timeseries_1'][mi_params]
ts2 = data['timeseries_2'][mi_params]
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['JC'].append(connectivity)
# Otherwise do this
else:
ts1 = data['timeseries_1']
ts2 = data['timeseries_2']
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['JC'].append(connectivity)
# Line up all appened arrays
dfc['JC'] = np.concatenate(dfc['JC'])
#Temporal derivative
if 'MTD' in methods:
dfc['TD'] = []
dfc_params={}
dfc_params['method'] = 'mtd'
dfc_params['dimord'] = 'node,time'
dfc_params['postpro'] = 'no'
dfc_params['windowsize'] = mtd_window
dfc_params['report'] = 'no'
if mi_parameters[0]:
# Do this if there are multiple mi parameters
for sim_it, mi_params in enumerate(mi_parameters):
ts1 = data['timeseries_1'][mi_params]
ts2 = data['timeseries_2'][mi_params]
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['TD'].append(np.lib.pad(np.hstack([np.nan,connectivity]),int((mtd_window-1)/2),mode='constant',constant_values=np.nan))
# Otherwise do this
else:
ts1 = data['timeseries_1']
ts2 = data['timeseries_2']
connectivity = teneto.derive.derive(np.array([ts1,ts2]),dfc_params)[0,1,:]
dfc['TD'].append(np.lib.pad(np.hstack([np.nan,connectivity]),int((mtd_window-1)/2),mode='constant',constant_values=np.nan))
# Line up all appened arrays
dfc['TD'] = np.concatenate(dfc['TD'])
df = | pd.DataFrame(data=dfc, index=data.index) | pandas.DataFrame |
""" io_utils.py
Utilities for reading and writing logs.
"""
import os
import statistics
import re
import csv
import numpy as np
import pandas as pd
import scipy as sc
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import networkx as nx
import tensorboardX
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
# Only necessary to rebuild the Chemistry example
# from rdkit import Chem
import utils.featgen as featgen
use_cuda = torch.cuda.is_available()
def gen_prefix(args):
'''Generate label prefix for a graph model.
'''
if args.bmname is not None:
name = args.bmname
else:
name = args.dataset
name += "_" + args.method
name += "_h" + str(args.hidden_dim) + "_o" + str(args.output_dim)
if not args.bias:
name += "_nobias"
if len(args.name_suffix) > 0:
name += "_" + args.name_suffix
return name
def gen_explainer_prefix(args):
'''Generate label prefix for a graph explainer model.
'''
name = gen_prefix(args) + "_explain"
if len(args.explainer_suffix) > 0:
name += "_" + args.explainer_suffix
return name
def create_filename(save_dir, args, isbest=False, num_epochs=-1):
"""
Args:
args : the arguments parsed in the parser
isbest : whether the saved model is the best-performing one
num_epochs : epoch number of the model (when isbest=False)
"""
filename = os.path.join(save_dir, gen_prefix(args))
os.makedirs(filename, exist_ok=True)
if isbest:
filename = os.path.join(filename, "best")
elif num_epochs > 0:
filename = os.path.join(filename, str(num_epochs))
return filename + ".pth.tar"
def save_checkpoint(model, optimizer, args, num_epochs=-1, isbest=False, cg_dict=None):
"""Save pytorch model checkpoint.
Args:
- model : The PyTorch model to save.
- optimizer : The optimizer used to train the model.
- args : A dict of meta-data about the model.
- num_epochs : Number of training epochs.
- isbest : True if the model has the highest accuracy so far.
- cg_dict : A dictionary of the sampled computation graphs.
"""
filename = create_filename(args.ckptdir, args, isbest, num_epochs=num_epochs)
torch.save(
{
"epoch": num_epochs,
"model_type": args.method,
"optimizer": optimizer,
"model_state": model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"cg": cg_dict,
},
filename,
)
def load_ckpt(args, isbest=False):
'''Load a pre-trained pytorch model from checkpoint.
'''
print("loading model")
filename = create_filename(args.ckptdir, args, isbest)
print(filename)
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
ckpt = torch.load(filename)
else:
print("Checkpoint does not exist!")
print("Checked path -- {}".format(filename))
print("Make sure you have provided the correct path!")
print("You may have forgotten to train a model for this dataset.")
print()
print("To train one of the paper's models, run the following")
print(">> python train.py --dataset=DATASET_NAME")
print()
raise Exception("File not found.")
return ckpt
def preprocess_cg(cg):
"""Pre-process computation graph."""
if use_cuda:
preprocessed_cg_tensor = torch.from_numpy(cg).cuda()
else:
preprocessed_cg_tensor = torch.from_numpy(cg)
preprocessed_cg_tensor.unsqueeze_(0)
return Variable(preprocessed_cg_tensor, requires_grad=False)
def load_model(path):
"""Load a pytorch model."""
model = torch.load(path)
model.eval()
if use_cuda:
model.cuda()
for p in model.features.parameters():
p.requires_grad = False
for p in model.classifier.parameters():
p.requires_grad = False
return model
def load_cg(path):
"""Load a computation graph."""
cg = pickle.load(open(path))
return cg
def save(mask_cg):
"""Save a rendering of the computation graph mask."""
mask = mask_cg.cpu().data.numpy()[0]
mask = np.transpose(mask, (1, 2, 0))
mask = (mask - np.min(mask)) / np.max(mask)
mask = 1 - mask
cv2.imwrite("mask.png", np.uint8(255 * mask))
def log_matrix(writer, mat, name, epoch, fig_size=(8, 6), dpi=200):
"""Save an image of a matrix to disk.
Args:
- writer : A file writer.
- mat : The matrix to write.
- name : Name of the file to save.
- epoch : Epoch number.
- fig_size : Size to of the figure to save.
- dpi : Resolution.
"""
plt.switch_backend("agg")
fig = plt.figure(figsize=fig_size, dpi=dpi)
mat = mat.cpu().detach().numpy()
if mat.ndim == 1:
mat = mat[:, np.newaxis]
plt.imshow(mat, cmap=plt.get_cmap("BuPu"))
cbar = plt.colorbar()
cbar.solids.set_edgecolor("face")
plt.tight_layout()
fig.canvas.draw()
writer.add_image(name, tensorboardX.utils.figure_to_image(fig), epoch)
def denoise_graph(adj, node_idx, feat=None, label=None, threshold=None, threshold_num=None, max_component=True):
"""Cleaning a graph by thresholding its node values.
Args:
- adj : Adjacency matrix.
- node_idx : Index of node to highlight (TODO ?)
- feat : An array of node features.
- label : A list of node labels.
- threshold : The weight threshold.
- theshold_num : The maximum number of nodes to threshold.
- max_component : TODO
"""
num_nodes = adj.shape[-1]
G = nx.Graph()
G.add_nodes_from(range(num_nodes))
G.nodes[node_idx]["self"] = 1
if feat is not None:
for node in G.nodes():
G.nodes[node]["feat"] = feat[node]
if label is not None:
for node in G.nodes():
G.nodes[node]["label"] = label[node]
if threshold_num is not None:
# this is for symmetric graphs: edges are repeated twice in adj
adj_threshold_num = threshold_num * 2
#adj += np.random.rand(adj.shape[0], adj.shape[1]) * 1e-4
neigh_size = len(adj[adj > 0])
threshold_num = min(neigh_size, adj_threshold_num)
threshold = np.sort(adj[adj > 0])[-threshold_num]
if threshold is not None:
weighted_edge_list = [
(i, j, adj[i, j])
for i in range(num_nodes)
for j in range(num_nodes)
if adj[i, j] >= threshold
]
else:
weighted_edge_list = [
(i, j, adj[i, j])
for i in range(num_nodes)
for j in range(num_nodes)
if adj[i, j] > 1e-6
]
G.add_weighted_edges_from(weighted_edge_list)
if max_component:
largest_cc = max(nx.connected_components(G), key=len)
G = G.subgraph(largest_cc).copy()
else:
# remove zero degree nodes
G.remove_nodes_from(list(nx.isolates(G)))
return G
# TODO: unify log_graph and log_graph2
def log_graph(
writer,
Gc,
name,
identify_self=True,
nodecolor="label",
epoch=0,
fig_size=(4, 3),
dpi=300,
label_node_feat=False,
edge_vmax=None,
args=None,
):
"""
Args:
nodecolor: the color of node, can be determined by 'label', or 'feat'. For feat, it needs to
be one-hot'
"""
cmap = plt.get_cmap("Set1")
plt.switch_backend("agg")
fig = plt.figure(figsize=fig_size, dpi=dpi)
node_colors = []
# edge_colors = [min(max(w, 0.0), 1.0) for (u,v,w) in Gc.edges.data('weight', default=1)]
edge_colors = [w for (u, v, w) in Gc.edges.data("weight", default=1)]
# maximum value for node color
vmax = 8
for i in Gc.nodes():
if nodecolor == "feat" and "feat" in Gc.nodes[i]:
num_classes = Gc.nodes[i]["feat"].size()[0]
if num_classes >= 10:
cmap = plt.get_cmap("tab20")
vmax = 19
elif num_classes >= 8:
cmap = plt.get_cmap("tab10")
vmax = 9
break
feat_labels = {}
for i in Gc.nodes():
if identify_self and "self" in Gc.nodes[i]:
node_colors.append(0)
elif nodecolor == "label" and "label" in Gc.nodes[i]:
node_colors.append(Gc.nodes[i]["label"] + 1)
elif nodecolor == "feat" and "feat" in Gc.nodes[i]:
# print(Gc.nodes[i]['feat'])
feat = Gc.nodes[i]["feat"].detach().numpy()
# idx with pos val in 1D array
feat_class = 0
for j in range(len(feat)):
if feat[j] == 1:
feat_class = j
break
node_colors.append(feat_class)
feat_labels[i] = feat_class
else:
node_colors.append(1)
if not label_node_feat:
feat_labels = None
plt.switch_backend("agg")
fig = plt.figure(figsize=fig_size, dpi=dpi)
if Gc.number_of_nodes() == 0:
raise Exception("empty graph")
if Gc.number_of_edges() == 0:
raise Exception("empty edge")
# remove_nodes = []
# for u in Gc.nodes():
# if Gc
pos_layout = nx.kamada_kawai_layout(Gc, weight=None)
# pos_layout = nx.spring_layout(Gc, weight=None)
weights = [d for (u, v, d) in Gc.edges(data="weight", default=1)]
if edge_vmax is None:
edge_vmax = statistics.median_high(
[d for (u, v, d) in Gc.edges(data="weight", default=1)]
)
min_color = min([d for (u, v, d) in Gc.edges(data="weight", default=1)])
# color range: gray to black
edge_vmin = 2 * min_color - edge_vmax
nx.draw(
Gc,
pos=pos_layout,
with_labels=False,
font_size=4,
labels=feat_labels,
node_color=node_colors,
vmin=0,
vmax=vmax,
cmap=cmap,
edge_color=edge_colors,
edge_cmap=plt.get_cmap("Greys"),
edge_vmin=edge_vmin,
edge_vmax=edge_vmax,
width=1.0,
node_size=50,
alpha=0.8,
)
fig.axes[0].xaxis.set_visible(False)
fig.canvas.draw()
if args is None:
save_path = os.path.join("log/", name + ".pdf")
else:
save_path = os.path.join(
"log", name + gen_explainer_prefix(args) + "_" + str(epoch) + ".pdf"
)
print("log/" + name + gen_explainer_prefix(args) + "_" + str(epoch) + ".pdf")
os.makedirs(os.path.dirname(save_path), exist_ok=True)
plt.savefig(save_path, format="pdf")
img = tensorboardX.utils.figure_to_image(fig)
writer.add_image(name, img, epoch)
def plot_cmap(cmap, ncolor):
"""
A convenient function to plot colors of a matplotlib cmap
Credit goes to http://gvallver.perso.univ-pau.fr/?p=712
Args:
ncolor (int): number of color to show
cmap: a cmap object or a matplotlib color name
"""
if isinstance(cmap, str):
name = cmap
try:
cm = plt.get_cmap(cmap)
except ValueError:
print("WARNINGS :", cmap, " is not a known colormap")
cm = plt.cm.gray
else:
cm = cmap
name = cm.name
with matplotlib.rc_context(matplotlib.rcParamsDefault):
fig = plt.figure(figsize=(12, 1), frameon=False)
ax = fig.add_subplot(111)
ax.pcolor(np.linspace(1, ncolor, ncolor).reshape(1, ncolor), cmap=cm)
ax.set_title(name)
xt = ax.set_xticks([])
yt = ax.set_yticks([])
return fig
def plot_cmap_tb(writer, cmap, ncolor, name):
"""Plot the color map used for plot."""
fig = plot_cmap(cmap, ncolor)
img = tensorboardX.utils.figure_to_image(fig)
writer.add_image(name, img, 0)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)
)
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def numpy_to_torch(img, requires_grad=True):
if len(img.shape) < 3:
output = np.float32([img])
else:
output = np.transpose(img, (2, 0, 1))
output = torch.from_numpy(output)
if use_cuda:
output = output.cuda()
output.unsqueeze_(0)
v = Variable(output, requires_grad=requires_grad)
return v
def read_graphfile(datadir, dataname, max_nodes=None, edge_labels=False):
""" Read data from https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets
graph index starts with 1 in file
Returns:
List of networkx objects with graph and node labels
"""
prefix = os.path.join(datadir, dataname, dataname)
filename_graph_indic = prefix + "_graph_indicator.txt"
# index of graphs that a given node belongs to
graph_indic = {}
with open(filename_graph_indic) as f:
i = 1
for line in f:
line = line.strip("\n")
graph_indic[i] = int(line)
i += 1
filename_nodes = prefix + "_node_labels.txt"
node_labels = []
min_label_val = None
try:
with open(filename_nodes) as f:
for line in f:
line = line.strip("\n")
l = int(line)
node_labels += [l]
if min_label_val is None or min_label_val > l:
min_label_val = l
# assume that node labels are consecutive
num_unique_node_labels = max(node_labels) - min_label_val + 1
node_labels = [l - min_label_val for l in node_labels]
except IOError:
print("No node labels")
filename_node_attrs = prefix + "_node_attributes.txt"
node_attrs = []
try:
with open(filename_node_attrs) as f:
for line in f:
line = line.strip("\s\n")
attrs = [
float(attr) for attr in re.split("[,\s]+", line) if not attr == ""
]
node_attrs.append(np.array(attrs))
except IOError:
print("No node attributes")
label_has_zero = False
filename_graphs = prefix + "_graph_labels.txt"
graph_labels = []
label_vals = []
with open(filename_graphs) as f:
for line in f:
line = line.strip("\n")
val = int(line)
if val not in label_vals:
label_vals.append(val)
graph_labels.append(val)
label_map_to_int = {val: i for i, val in enumerate(label_vals)}
graph_labels = np.array([label_map_to_int[l] for l in graph_labels])
if edge_labels:
# For Tox21_AHR we want to know edge labels
filename_edges = prefix + "_edge_labels.txt"
edge_labels = []
edge_label_vals = []
with open(filename_edges) as f:
for line in f:
line = line.strip("\n")
val = int(line)
if val not in edge_label_vals:
edge_label_vals.append(val)
edge_labels.append(val)
edge_label_map_to_int = {val: i for i, val in enumerate(edge_label_vals)}
filename_adj = prefix + "_A.txt"
adj_list = {i: [] for i in range(1, len(graph_labels) + 1)}
# edge_label_list={i:[] for i in range(1,len(graph_labels)+1)}
index_graph = {i: [] for i in range(1, len(graph_labels) + 1)}
num_edges = 0
with open(filename_adj) as f:
for line in f:
line = line.strip("\n").split(",")
e0, e1 = (int(line[0].strip(" ")), int(line[1].strip(" ")))
adj_list[graph_indic[e0]].append((e0, e1))
index_graph[graph_indic[e0]] += [e0, e1]
# edge_label_list[graph_indic[e0]].append(edge_labels[num_edges])
num_edges += 1
for k in index_graph.keys():
index_graph[k] = [u - 1 for u in set(index_graph[k])]
graphs = []
for i in range(1, 1 + len(adj_list)):
# indexed from 1 here
G = nx.from_edgelist(adj_list[i])
if max_nodes is not None and G.number_of_nodes() > max_nodes:
continue
# add features and labels
G.graph["label"] = graph_labels[i - 1]
# Special label for aromaticity experiment
# aromatic_edge = 2
# G.graph['aromatic'] = aromatic_edge in edge_label_list[i]
for u in G.nodes():
if len(node_labels) > 0:
node_label_one_hot = [0] * num_unique_node_labels
node_label = node_labels[u - 1]
node_label_one_hot[node_label] = 1
G.nodes[u]["label"] = node_label_one_hot
if len(node_attrs) > 0:
G.nodes[u]["feat"] = node_attrs[u - 1]
if len(node_attrs) > 0:
G.graph["feat_dim"] = node_attrs[0].shape[0]
# relabeling
mapping = {}
it = 0
if float(nx.__version__) < 2.0:
for n in G.nodes():
mapping[n] = it
it += 1
else:
for n in G.nodes:
mapping[n] = it
it += 1
# indexed from 0
graphs.append(nx.relabel_nodes(G, mapping))
return graphs
def read_biosnap(datadir, edgelist_file, label_file, feat_file=None, concat=True):
""" Read data from BioSnap
Returns:
List of networkx objects with graph and node labels
"""
G = nx.Graph()
delimiter = "\t" if "tsv" in edgelist_file else ","
print(delimiter)
df = pd.read_csv(
os.path.join(datadir, edgelist_file), delimiter=delimiter, header=None
)
data = list(map(tuple, df.values.tolist()))
G.add_edges_from(data)
print("Total nodes: ", G.number_of_nodes())
G = max(nx.connected_component_subgraphs(G), key=len)
print("Total nodes in largest connected component: ", G.number_of_nodes())
df = pd.read_csv(os.path.join(datadir, label_file), delimiter="\t", usecols=[0, 1])
data = list(map(tuple, df.values.tolist()))
missing_node = 0
for line in data:
if int(line[0]) not in G:
missing_node += 1
else:
G.nodes[int(line[0])]["label"] = int(line[1] == "Essential")
print("missing node: ", missing_node)
missing_label = 0
remove_nodes = []
for u in G.nodes():
if "label" not in G.nodes[u]:
missing_label += 1
remove_nodes.append(u)
G.remove_nodes_from(remove_nodes)
print("missing_label: ", missing_label)
if feat_file is None:
feature_generator = featgen.ConstFeatureGen(np.ones(10, dtype=float))
feature_generator.gen_node_features(G)
else:
df = pd.read_csv(os.path.join(datadir, feat_file), delimiter=",")
data = np.array(df.values)
print("Feat shape: ", data.shape)
for row in data:
if int(row[0]) in G:
if concat:
node = int(row[0])
onehot = np.zeros(10)
onehot[min(G.degree[node], 10) - 1] = 1.0
G.nodes[node]["feat"] = np.hstack(
(np.log(row[1:] + 0.1), [1.0], onehot)
)
else:
G.nodes[int(row[0])]["feat"] = np.log(row[1:] + 0.1)
missing_feat = 0
remove_nodes = []
for u in G.nodes():
if "feat" not in G.nodes[u]:
missing_feat += 1
remove_nodes.append(u)
G.remove_nodes_from(remove_nodes)
print("missing feat: ", missing_feat)
return G
def build_aromaticity_dataset():
filename = "data/tox21_10k_data_all.sdf"
basename = filename.split(".")[0]
collector = []
sdprovider = Chem.SDMolSupplier(filename)
for i,mol in enumerate(sdprovider):
try:
moldict = {}
moldict['smiles'] = Chem.MolToSmiles(mol)
#Parse Data
for propname in mol.GetPropNames():
moldict[propname] = mol.GetProp(propname)
nb_bonds = len(mol.GetBonds())
is_aromatic = False; aromatic_bonds = []
for j in range(nb_bonds):
if mol.GetBondWithIdx(j).GetIsAromatic():
aromatic_bonds.append(j)
is_aromatic = True
moldict['aromaticity'] = is_aromatic
moldict['aromatic_bonds'] = aromatic_bonds
collector.append(moldict)
except:
print("Molecule %s failed"%i)
data = | pd.DataFrame(collector) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 = pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
other_feature3.to_csv('data/other_feature3.csv',index=None)
print(other_feature3.shape)
#for dataset2
t = dataset2[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset2[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset2[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset2[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset2[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset2[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset2[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset2[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature2 = pd.merge(t1,t,on='user_id')
other_feature2 = pd.merge(other_feature2,t3,on=['user_id','coupon_id'])
other_feature2 = pd.merge(other_feature2,t4,on=['user_id','date_received'])
other_feature2 = pd.merge(other_feature2,t5,on=['user_id','coupon_id','date_received'])
other_feature2 = pd.merge(other_feature2,t7,on=['user_id','coupon_id','date_received'])
other_feature2.to_csv('data/other_feature2.csv',index=None)
print(other_feature2.shape)
#for dataset1
t = dataset1[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset1[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset1[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset1[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset1[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset1[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset1[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset1[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature1 = pd.merge(t1,t,on='user_id')
other_feature1 = pd.merge(other_feature1,t3,on=['user_id','coupon_id'])
other_feature1 = pd.merge(other_feature1,t4,on=['user_id','date_received'])
other_feature1 = pd.merge(other_feature1,t5,on=['user_id','coupon_id','date_received'])
other_feature1 = pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received'])
other_feature1.to_csv('data/other_feature1.csv',index=None)
print(other_feature1.shape)
############# coupon related feature #############
"""
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
"""
def calc_discount_rate(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return float(s[0])
else:
return 1.0-float(s[1])/float(s[0])
def get_discount_man(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[0])
def get_discount_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[1])
def is_man_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 0
else:
return 1
#dataset3
dataset3['day_of_week'] = dataset3.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset3['day_of_month'] = dataset3.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset3['days_distance'] = dataset3.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,6,30)).days)
dataset3['discount_man'] = dataset3.discount_rate.apply(get_discount_man)
dataset3['discount_jian'] = dataset3.discount_rate.apply(get_discount_jian)
dataset3['is_man_jian'] = dataset3.discount_rate.apply(is_man_jian)
dataset3['discount_rate'] = dataset3.discount_rate.apply(calc_discount_rate)
d = dataset3[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset3 = pd.merge(dataset3,d,on='coupon_id',how='left')
dataset3.to_csv('data/coupon3_feature.csv',index=None)
#dataset2
dataset2['day_of_week'] = dataset2.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset2['day_of_month'] = dataset2.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset2['days_distance'] = dataset2.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,5,14)).days)
dataset2['discount_man'] = dataset2.discount_rate.apply(get_discount_man)
dataset2['discount_jian'] = dataset2.discount_rate.apply(get_discount_jian)
dataset2['is_man_jian'] = dataset2.discount_rate.apply(is_man_jian)
dataset2['discount_rate'] = dataset2.discount_rate.apply(calc_discount_rate)
d = dataset2[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset2 = pd.merge(dataset2,d,on='coupon_id',how='left')
dataset2.to_csv('data/coupon2_feature.csv',index=None)
#dataset1
dataset1['day_of_week'] = dataset1.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset1['day_of_month'] = dataset1.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset1['days_distance'] = dataset1.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,4,13)).days)
dataset1['discount_man'] = dataset1.discount_rate.apply(get_discount_man)
dataset1['discount_jian'] = dataset1.discount_rate.apply(get_discount_jian)
dataset1['is_man_jian'] = dataset1.discount_rate.apply(is_man_jian)
dataset1['discount_rate'] = dataset1.discount_rate.apply(calc_discount_rate)
d = dataset1[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset1 = pd.merge(dataset1,d,on='coupon_id',how='left')
dataset1.to_csv('data/coupon1_feature.csv',index=None)
############# merchant related feature #############
"""
1.merchant related:
total_sales. sales_use_coupon. total_coupon
coupon_rate = sales_use_coupon/total_sales.
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
"""
#for dataset3
merchant3 = feature3[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant3[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant3[merchant3.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant3[merchant3.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant3_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t2,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t3,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t5,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t6,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t7,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t8,on='merchant_id',how='left')
merchant3_feature.sales_use_coupon = merchant3_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature['merchant_coupon_transfer_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_coupon
merchant3_feature['coupon_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_sales
merchant3_feature.total_coupon = merchant3_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature.to_csv('data/merchant3_feature.csv',index=None)
#for dataset2
merchant2 = feature2[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant2[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant2[merchant2.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant2[merchant2.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant2_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t2,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t3,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t5,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t6,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t7,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t8,on='merchant_id',how='left')
merchant2_feature.sales_use_coupon = merchant2_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant2_feature['merchant_coupon_transfer_rate'] = merchant2_feature.sales_use_coupon.astype('float') / merchant2_feature.total_coupon
merchant2_feature['coupon_rate'] = merchant2_feature.sales_use_coupon.astype('float') / merchant2_feature.total_sales
merchant2_feature.total_coupon = merchant2_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant2_feature.to_csv('data/merchant2_feature.csv',index=None)
#for dataset1
merchant1 = feature1[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant1[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant1[merchant1.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant1[(merchant1.date!='null')&(merchant1.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant1[merchant1.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant1[(merchant1.date!='null')&(merchant1.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant1_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t2,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t3,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t5,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t6,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t7,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t8,on='merchant_id',how='left')
merchant1_feature.sales_use_coupon = merchant1_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant1_feature['merchant_coupon_transfer_rate'] = merchant1_feature.sales_use_coupon.astype('float') / merchant1_feature.total_coupon
merchant1_feature['coupon_rate'] = merchant1_feature.sales_use_coupon.astype('float') / merchant1_feature.total_sales
merchant1_feature.total_coupon = merchant1_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant1_feature.to_csv('data/merchant1_feature.csv',index=None)
############# user related feature #############
"""
3.user related:
count_merchant.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
buy_use_coupon/buy_total
user_date_datereceived_gap
"""
def get_user_date_datereceived_gap(s):
s = s.split(':')
return (date(int(s[0][0:4]),int(s[0][4:6]),int(s[0][6:8])) - date(int(s[1][0:4]),int(s[1][4:6]),int(s[1][6:8]))).days
#for dataset3
user3 = feature3[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user3[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user3[user3.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user3[(user3.date!='null')&(user3.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user3[(user3.date!='null')&(user3.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user3[user3.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user3[user3.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user3[(user3.date_received!='null')&(user3.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user3_feature = pd.merge(t,t1,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t3,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t4,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t5,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t6,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t7,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t8,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t9,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t11,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t12,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t13,on='user_id',how='left')
user3_feature.count_merchant = user3_feature.count_merchant.replace(np.nan,0)
user3_feature.buy_use_coupon = user3_feature.buy_use_coupon.replace(np.nan,0)
user3_feature['buy_use_coupon_rate'] = user3_feature.buy_use_coupon.astype('float') / user3_feature.buy_total.astype('float')
user3_feature['user_coupon_transfer_rate'] = user3_feature.buy_use_coupon.astype('float') / user3_feature.coupon_received.astype('float')
user3_feature.buy_total = user3_feature.buy_total.replace(np.nan,0)
user3_feature.coupon_received = user3_feature.coupon_received.replace(np.nan,0)
user3_feature.to_csv('data/user3_feature.csv',index=None)
#for dataset2
user2 = feature2[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user2[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user2[user2.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user2[(user2.date!='null')&(user2.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user2[(user2.date!='null')&(user2.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user2[user2.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user2[user2.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user2[(user2.date_received!='null')&(user2.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user2_feature = | pd.merge(t,t1,on='user_id',how='left') | pandas.merge |
from abc import ABC
import numpy as np
import pandas as pd
from fastnumbers import isintlike, isreal, fast_forceint, fast_float
from optimus.engines.base.functions import BaseFunctions
from optimus.helpers.logger import logger
from optimus.infer import is_int_like, is_list_or_tuple
class PandasBaseFunctions(BaseFunctions, ABC):
@staticmethod
def is_string(series):
def _is_string(value):
if isinstance(value, str):
return True
else:
return False
return np.vectorize(_is_string)(series.values).flatten()
def is_integer(self, series):
if str(series.dtype) in self.constants.DATETIME_INTERNAL_TYPES:
return False
if str(series.dtype) in self.constants.INT_INTERNAL_TYPES:
return True
return np.vectorize(isintlike)(series).flatten()
def is_float(self, series):
if str(series.dtype) in self.constants.DATETIME_INTERNAL_TYPES:
return False
# use isreal to allow strings like "0"
return np.vectorize(isreal)(series).flatten()
def is_numeric(self, series):
if str(series.dtype) in self.constants.DATETIME_INTERNAL_TYPES:
return False
return np.vectorize(isreal)(series).flatten()
@classmethod
def _to_integer(cls, series, default=0):
# TODO replace_inf
if is_int_like(default):
int_type = True
default = int(default)
otypes = [int]
else:
int_type = False
otypes = [object]
try:
if default is not None:
series = series.fillna(default)
series = pd.Series(np.vectorize(fast_forceint,
otypes=otypes)(series, default=default,
on_fail=lambda x: default).flatten())
except Exception:
series = series.replace([np.inf, -np.inf], default)
if int_type:
series = pd.Series(np.floor(pd.to_numeric(series, errors='coerce', downcast='integer'))).fillna(default)
try:
series = series.astype('int64')
except:
pass
else:
series = pd.Series(np.floor( | pd.to_numeric(series, errors='coerce') | pandas.to_numeric |
"""
Simple Streamlit webserver application for serving developed embedding
a dashboard visualisation in streamlit.
"""
# Streamlit dependencies
import streamlit as st
st.beta_set_page_config(layout="wide", page_icon="pear")
#import joblib,os
# Data dependencies
import numpy as np
import random
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
import pandas as pd
from pulp import *
import math
import base64
from PIL import Image
import plotly.graph_objects as go
#import asyncio
#from httpx_oauth.clients.google import GoogleOAuth2
import datetime
# import sql DATABASE
from database.sql_tables import database
# Custom libraries
from visuals.forecasts import plot_forecast
# The main function where we will build the actual app
def main():
"""Dashboard with Streamlit """
# Creates a main title and subheader on your page -
# these are static across all pages
#st.title("Fresh Produce Inventory Planner")
# Creating sidebar with selection box -
# you can create multiple pages this way
#<<<<<<< HEAD
#st.beta_set_page_config(layout="wide", page_icon="pear")
#=======
#st.beta_set_page_config(layout="wide")
#>>>>>>> 0060de76c23d092415ea4efc1cd6dd8249517c6f
options = ["Trends", "Inventory Planning"]
selection = st.sidebar.selectbox("Choose Option", options)
# Building out the "Information" page
if selection == "Trends":
#st.info("General Information")
# You can read a markdown file from supporting resources folder
#st.subheader("Dashboard")
#st.subheader("==========================================================")
#st.subheader("Dashboard Trends")
#st.subheader("==========================================================")
@st.cache(allow_output_mutation=True)
def get_base64_of_bin_file(bin_file):
with open(bin_file, 'rb') as f:
data = f.read()
return base64.b64encode(data).decode()
def set_png_as_page_bg(png_file):
bin_str = get_base64_of_bin_file(png_file)
page_bg_img = '''
<style>
body {
background-image: url("data:image/png;base64,%s");
background-size: cover;
}
</style>
''' % bin_str
st.markdown(page_bg_img, unsafe_allow_html=True)
return
#set_png_as_page_bg('Inventory Planning.png')
st.markdown("""
<iframe width="1190" height="800" src="https://app.powerbi.com/view?r=<KEY>" frameborder="0" allowFullScreen="true"></iframe>
""", unsafe_allow_html=True)
#st.markdown("""
#<iframe width="800" height="486" src="https://app.powerbi.com/view?r=<KEY>" frameborder="0" allowFullScreen="true"></iframe>
#""", unsafe_allow_html=True)
if selection == "Inventory Planning":
image = Image.open('Inventory Smaller header.png')
st.image(image, caption=None, use_column_width=True)
#st.subheader("==========================================================")
#st.subheader("Inventory Planning")
#st.subheader("==========================================================")
#"""### 2) GENERATE DATA"""
############################### STREAMLIT APP #########################################################
commodity = sorted(database['Commodities'].unique(), reverse=False)
selected_commodity = st.sidebar.selectbox(
label="Commodity",
options=commodity,
index=commodity.index("BEETROOT")
)
weight = database[database['Commodities'] == selected_commodity]['Weight_Kg'].unique()
selected_weight = st.sidebar.selectbox(
label="Weight (kg)",
options=weight
)
grade = database[
(database['Commodities'] == selected_commodity) & \
(database['Weight_Kg'] == selected_weight)
]['Size_Grade'].unique()
selected_grade = st.sidebar.selectbox(
label="Size Grade",
options=grade
)
# INSERT CALENDAR
today = datetime.date.today()
future_date = today + datetime.timedelta(days=1)
st.sidebar.markdown(f"""Today's date: **{today}**""")
forecast_date = st.sidebar.date_input('Forecast date', future_date)
df = database[
(database['Commodities'] == selected_commodity) & \
(database['Weight_Kg'] == selected_weight) & \
(database['Size_Grade'] == selected_grade)
][['Date', 'avg_per_kg']]
price = df.groupby('Date')['avg_per_kg'].mean()
price = pd.DataFrame(price)
price = price.asfreq('B', method='backfill')
result, pred = plot_forecast(price, 'avg_per_kg', selected_commodity, 60)
if forecast_date <= today:
st.sidebar.error("Error: Forecast date must fall after today's date.")
elif forecast_date > pred.index[-1]:
st.sidebar.error('Error: Forecast date not in forecast horizon')
elif forecast_date.weekday() == 5:
st.sidebar.error('Error: No forecasts generated for the weekend.')
elif forecast_date.weekday() == 6:
st.sidebar.error('Error: No forecasts generated for the weekend.')
else:
st.sidebar.success(f'''
Projected cost is R {pred[str(forecast_date)]:.2f} /Kg \n
Per container: R {pred[str(forecast_date)] * selected_weight:.2f}
''')
st.plotly_chart(result)
np.random.seed(123)
def generate_toy_data(n, lam, a, b):
'''Generate random number from poisson distribution.
Input:
n = number of data points to generate
lam = lambda of the poisson distribution
a, b = any positive coefficient (since we want to simulate demand)
Output:
x = independent variable
y = demand for toy data that results from a*x + b, with x ~ poisson(lam)
'''
x = np.random.poisson(lam, n)
x = x.reshape((-1,1))
y = a*x + b
y = y.reshape((-1,1))
y = y.astype(int)
return x, y
# generate toy data
demand=st.slider("What level of demand do you expect to have next week?",1.00,100.00,25.00, format="%f percent")/100
st.markdown ("Expecting low demand: 1%-25% of customers compared to previous week")
st.write ("Expecting medium demand: 25%-75% of customers compared to previous week")
st.write ("Expecting high demand: 75%-100% of customers compared to previous week")
##high_demand=0.75
x, y = generate_toy_data(1000, 100, demand,2)
# visualize toy data
plt.figure()
sns.distplot(y)
plt.show()
##st.pyplot()
#"""### 3) IMPLEMENT SIMPLE PREDICTION"""
# split data to training and testing set
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2)
df_train = pd.DataFrame({'train_x': train_x.flatten(), 'train_y': train_y.flatten()})
df_test = pd.DataFrame({'test_x': test_x.flatten(), 'test_y': test_y.flatten()})
#"""#### a) RANDOM FOREST"""
rf = RandomForestRegressor(n_estimators = 1000, random_state = 42)
rf.fit(train_x, train_y)
test_y_rfpred = (rf.predict(test_x)).astype(int)
err_rf = abs(test_y_rfpred - test_y)
print("Mean Absolute Error:", round(np.mean(err_rf), 2), "degrees.")
#"""#### b) LINEAR REGRESSION"""
lr = LinearRegression()
lr.fit(train_x, train_y)
test_y_lrpred = (rf.predict(test_x)).astype(int)
err_lr = abs(test_y_lrpred - test_y)
print('Mean Absolute Error:', round(np.mean(err_lr), 2), 'degrees.')
# check coefficient
print(lr.coef_)
print(lr.intercept_)
#"""#### c) SUMMARIZE RESULT & CHECK GRAPH (RANDOM FOREST & LINREG)"""
df_summ = pd.DataFrame({'test_x': test_x.flatten(),
'test_y': test_y.flatten(),
'test_y_rfpred': test_y_rfpred.flatten(),
'test_y_lrpred': test_y_lrpred.flatten()})
df_summ['diff_rf_actual'] = df_summ['test_y_rfpred'] - df_summ['test_y']
df_summ['diff_lr_actual'] = df_summ['test_y_lrpred'] - df_summ['test_y']
# toy data is simple, hence prediction is rather powerful as we only have few missed prediction
# even so, in this kind of cases:
# --> we will lose potential profit if prediction < demand
# --> we will incur unnecessary cost if prediction > demand as we cannot sell the remaining goods at selling price
#df_summ[(df_summ['diff_rf_actual'] != 0) | (df_summ['diff_lr_actual'] != 0)]
plt.figure()
components = [test_y, test_y_rfpred, test_y_lrpred]
labels=['test_y', 'test_y_rfpred', 'test_y_lrpred']
fig, axes = plt.subplots(1)
for component in components:
sns.distplot(component)
axes.legend(labels=labels)
plt.show()
#st.pyplot(fig)
#"""### 4) STOCHASTIC PROGRAMMING
#### a) DISCRETIZING DEMAND: TO CAPTURE PROBABILITY OF EACH POSSIBLE SCENARIO
#"""
# capturing probability of each possible scenario can be done in many ways,
# ranging from simple descriptive analytics to more complicated things like
# moment matching, monte carlo simulation, etc.
# we do the easiest here: do clustering to generate scenario (max 100 scenario for now)
def cluster_1d(df, max_cluster=100):
'''Cluster data into n different cluster where n is the minimum between unique scenario and max_cluster.
Input:
df = dataframe column containing scenario to cluster
max_cluster = number of maximum cluster we want to have (default=100)
Output:
cluster_centers_df = mapping between cluster labels and its centers
cluster_labels_df = mapping between df and its cluster labels
'''
km = KMeans(n_clusters=min(len(df.unique()),max_cluster))
km.fit(df.values.reshape(-1,1))
# get information about center
cluster_centers_df = pd.DataFrame(np.array(km.cluster_centers_.reshape(1,-1)[0].tolist()))
cluster_centers_df.columns = ['cluster_centers']
cluster_centers_df['labels'] = range(len(cluster_centers_df))
# get information about labels and add information about center
cluster_labels_df = pd.DataFrame(np.array(km.labels_))
cluster_labels_df.columns = ['labels']
cluster_labels_df = pd.concat([df.reset_index(drop=True), cluster_labels_df], axis=1)
cluster_labels_df = pd.merge(cluster_labels_df, cluster_centers_df, on='labels', how='left')
return cluster_centers_df, cluster_labels_df
def cluster_summ(df):
'''Summarize probability for each scenario by referring to result from cluster_1d.
Input:
df = dataframe column containing scenario to cluster
Output:
cluster_proportion_df = dataframe containing complete information about probability for each scenario
demand = possible scenario to happen
weight = probability of the possible scenario to happen
scenarios = indexing for demand
'''
cluster_centers_df, cluster_labels_df = cluster_1d(df)
count_label = cluster_labels_df[['labels']].count().values[0]
cluster_proportion_df = cluster_labels_df[['cluster_centers', 'labels']].groupby('cluster_centers').count().reset_index(drop=False)
cluster_proportion_df['count_labels'] = count_label
cluster_proportion_df['proportion_labels'] = cluster_proportion_df['labels'] / cluster_proportion_df['count_labels']
cluster_proportion_df['index'] = range(1,cluster_proportion_df.shape[0] + 1)
cluster_proportion_df['cluster_centers'] = np.round(cluster_proportion_df['cluster_centers'], decimals=(3))
demand = | pd.Series(cluster_proportion_df['cluster_centers'].values, index=cluster_proportion_df['index'].values) | pandas.Series |
#
# Example of solving the inventory control with lost sales problem
#
# Env: https://github.com/paulhendricks/gym-inventory/blob/master/gym_inventory/envs/inventory_env.py
#
# Author: <NAME>, NUS/ISS
#
import gym
import pyogmaneo
import gym_inventory # workaround for registration issue
import matplotlib.pyplot as plt
import pandas as pd
from EnvRunner import EnvRunner
timesteps=50
episodes=1000
max_inventory=50
d_lambda=25
env = gym.make('Inventory-v0', n=max_inventory, lam=d_lambda)
runner = EnvRunner(env, terminalReward=100)
history = []
for episode in range(episodes):
env.reset()
# timesteps
reward = 0
for t in range(timesteps):
# EnvRunner expects an array of observations
done, reward = runner.act(obsPreprocess=lambda obs : [obs])
if done:
break
history.append(reward)
print(f'Episode {episode+1} finished after {t+1} timesteps, reward {reward}')
# plot episodes v rewards
fig, ax = plt.subplots()
df = | pd.Series(history) | pandas.Series |
import numpy as np
import matplotlib.pyplot as plt
import itertools
import os
from multiprocessing import Pool
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
import scipy.integrate
from sklearn.metrics import mean_squared_error
from scipy.linalg import svd
from scipy.optimize import least_squares
import datetime
import bokeh.io
import bokeh.application
import bokeh.application.handlers
import bokeh.models
import holoviews as hv
# bokeh.io.output_notebook()
hv.extension('bokeh')
import git
import sys
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
sys.path.insert(1, f"{homedir}" + '/models/data_processing')
import loader
death_time = 14
def add_active_cases(us, data_active_cases):
active_cases = loader.load_data(data_active_cases)
active_cases['FIPS']=active_cases['FIPS'].astype(int)
loader.convert_dates(active_cases, "Date")
difference = (pd.to_datetime(active_cases['Date'])[0] - pd.to_datetime(us['date'])[0])/np.timedelta64(1, 'D')
active_column = []
end = len(us)-1
for index, row in us.iterrows():
print(f"{index}/{end}")
county = row['fips']
date = row['date_processed']
if date < difference:
active_column.append(-1)
else:
entry = (active_cases[(active_cases.date_processed==date-difference) & (active_cases.FIPS == county)])["Active"].values
if len(entry) != 0:
active_column.append(entry[0])
else:
active_column.append(-1)
us["active_cases"] = active_column
return us
def process_data(data_covid, data_population, save=True):
covid = loader.load_data(data_covid)
loader.convert_dates(covid, "date")
population = loader.load_data(data_population)
covid.loc[covid["county"]=='New York City', "fips"]=36061
covid['Population'] = covid.apply(lambda row: loader.query(population, "FIPS", row.fips)['total_pop'], axis=1)
covid.dropna(subset=['fips'], inplace=True)
covid['fips']=covid['fips'].astype(int)
covid = add_active_cases(covid, "/data/us/covid/JHU_daily_US.csv")
if save:
covid.to_csv(f"{homedir}" + "/models/epidemiological/production/us_training_data.csv")
return covid
###########################################################
def get_variables(res, data, index):
extrapolate = -1
if index > len(data)-1:
extrapolate = index + (index-len(data)+1)
s = model(res.x, data, extrapolate=extrapolate)
P = s[:,0][index]
E = s[:,1][index]
C = s[:,2][index]
A = s[:,3][index]
I = s[:,4][index]
Q = s[:,5][index]
R = s[:,6][index]
return (P,E,C,A,I,Q,R)
def get_deaths(res, data, extrapolate=14):
s = model(res.x, data, len(data)+extrapolate)
P = s[:,0]
E = s[:,1]
C = s[:,2]
A = s[:,3]
I = s[:,4]
Q = s[:,5]
R = s[:,6]
D = s[:,7]
tp = np.arange(0, len(data)+extrapolate)
deaths = list(zip(tp,D))
return deaths
def get_death_cdf(death_pdf, extrapolate, switch=True):
death_cdf = []
for percentile in [10, 20, 30, 40, 50, 60, 70, 80, 90]: #make this a separate function
forecast = list(np.nanpercentile(death_pdf, percentile, axis=0))
death_cdf.append(forecast)
if switch == False:
extrapolate = int(extrapolate)
end = len(death_cdf[-1])
if extrapolate >= 14:
end = end - extrapolate + 14
max_total = death_cdf[-1][-1*(extrapolate-1):end]
max_total_previous = death_cdf[-1][-1*(extrapolate):end-1]
min_total = death_cdf[0][-1*(extrapolate-1):end]
min_total_previous = death_cdf[0][-1*(extrapolate):end-1]
max_daily_change = [i - j for i, j in zip(max_total, max_total_previous)]
min_daily_change = [i - j for i, j in zip(min_total, min_total_previous)]
expected_total = death_cdf[4][-1*(extrapolate-1):end]
expected__total_previous = death_cdf[4][-1*(extrapolate):end-1]
expected__daily_change = [i - j for i, j in zip(expected_total, expected__total_previous)]
expected = np.mean(expected__daily_change)
diff = np.mean(np.array(max_daily_change)-np.array(min_daily_change))
# ratio = np.mean(np.array(max_daily_change)/np.array(min_daily_change))
ratio = diff/expected
print(ratio)
if ratio > 0.5:
print("recalculate error bounds")
# See how general these parameter variances are
# [1.16498627e-05 2.06999186e-05 5.41782152e-04 6.49380289e-06
# 4.84675662e-05 3.57516920e-05 1.98097480e-05 8.96749155e-06
# 3.90853805e-06 3.22475887e-06 4.37489771e-06 3.47350497e-05
# 1.22894548e-06 3.21246366e-05 1.15024316e-04 3.08582517e-04
# 1.02787854e-04 2.77456475e-05 4.87059431e-05 8.25090225e-04
# 5.81252202e-04 1.02128167e-03 3.15389632e-05 0.00000000e+00
# 5.93277957e-05]
death_cdf = None
return death_cdf
def reject_outliers(data, m = 1.):
data = np.array(data)
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s<m]
# def reject_outliers(data, m=2):
# data = np.array(data)
# return data[abs(data - np.mean(data)) < m * np.std(data)]
# returns standard deviation of fitted parameters
def get_param_errors(res, pop):
pfit = res.x
pcov = res.jac
pcov = np.dot(pcov.T, pcov)
pcov = np.linalg.pinv(pcov) #uses svd
pcov = np.diag(pcov)
rcov = np.cov(res.fun)/pop ##put res.fun/pop inside
# scaler = res.cost*len(res.fun)/pop
# rcov = rcov * scaler
perr = pcov * rcov
perr = np.sqrt(perr)
return perr
###########################################################
def pecaiqr(dat, t, params, N, max_t):
# define a time td of social distancing
# for t > td, divide dI/dt and dA/dt and dQ/dt and dC/dt by 2
# might not be able to do this, as there is still one parameter fit, bad to average
# need to paste together two separate parameter fit regimes, fitted on time<td and time>td. Initial conditions of second fit are the last datapoints before td
# initial parameters of the second fit are the fitted parameters of the first fit. Perhaps even fix some parameters to the values from the original fit? Look at leastsquares documentation
# this way i wont need to guess how much social distancing reduces the differentials, and i can also output new parameters to see how they change
if t >= max_t:
return [0]*8
a_1 = params[0]
a_2 = params[1]
a_3 = params[2]
b_1 = params[3]
b_2 = params[4]
b_3 = params[5]
b_4 = params[6]
g_a = params[7]
g_i = params[8]
th = params[9]
del_a = params[10]
del_i = params[11]
r_a = params[12]
r_i = params[13]
r_q = params[14]
d_i = params[15]
d_q = params[16]
P = dat[0]
E = dat[1]
C = dat[2]
A = dat[3]
I = dat[4]
Q = dat[5]
R = dat[6]
dPdt = (- ((a_1+a_2)*C*P)/N) + (-a_3*P + b_4*E)*(N/(P+E))
dEdt = (- (b_1 * A + b_2 * I) * E / N) + b_3*C + (a_3*P - b_4*E)*(N/(P+E))
dCdt = -(g_a + g_i)*C + ((b_1 * A + b_2 * I) * E / N) - b_3*C
dAdt = (a_1 * C * P) / N + g_a*C - (r_a + del_a + th)*A
dIdt = (a_2 * C * P) / N + g_i*C - ((r_i+d_i)+del_i)*I+th*A
dQdt = del_a*A + del_i*I - (r_q+d_q)*Q
dRdt = r_a*A + (r_i+d_i)*I + (r_q+d_q)*Q
dDdt = d_i*I + d_q*Q
dzdt = [dPdt, dEdt, dCdt, dAdt, dIdt, dQdt, dRdt, dDdt]
return dzdt
def model(params, data, extrapolate=-1, offset=0, death_metric="deaths"):
N = data['Population'].values[0] # total population
initial_conditions = N * np.array(params[-7:]) # the parameters are a fraction of the population so multiply by the population
P0 = initial_conditions[0]
E0 = initial_conditions[1]
C0 = initial_conditions[2]
A0 = initial_conditions[3]
I0 = initial_conditions[4]
Q0 = initial_conditions[5]
# Q0 = data['active_cases'].values[0] #fit to active cases instead
R0 = initial_conditions[6]
D0 = abs(data[death_metric].values[0])
yz_0 = np.array([P0, E0, C0, A0, I0, Q0, R0, D0])
n = len(data)
if extrapolate > 0:
n += extrapolate
args = (params, N, n)
try:
s = scipy.integrate.odeint(pecaiqr, yz_0, np.arange(offset, n), args=args) #printmsg = True
# s = scipy.integrate.solve_ivp(fun=lambda t, y: pecaiqr(y, t, params, N, n), t_span=[offset, n], y0=yz_0, t_eval=np.arange(offset, n), method="LSODA")
except RuntimeError:
print('RuntimeError', params)
return np.zeros((n, len(yz_0)))
return s
def model_ivp(params, data, extrapolate=-1, offset=0, death_metric="deaths"):
N = data['Population'].values[0] # total population
initial_conditions = N * np.array(params[-7:]) # the parameters are a fraction of the population so multiply by the population
P0 = initial_conditions[0]
E0 = initial_conditions[1]
C0 = initial_conditions[2]
A0 = initial_conditions[3]
I0 = initial_conditions[4]
Q0 = initial_conditions[5]
# Q0 = data['active_cases'].values[0] #fit to active cases instead
R0 = initial_conditions[6]
D0 = abs(data[death_metric].values[0])
yz_0 = np.array([P0, E0, C0, A0, I0, Q0, R0, D0])
n = len(data) + extrapolate
solved = scipy.integrate.solve_ivp(fun=lambda t, y: pecaiqr(y, t, params, N, n), t_span=[offset, n], y0=yz_0, t_eval=np.arange(offset, n), method="LSODA")
s = solved.y
status = solved.success
print(status)
# if status is False or diff < 0:
# s = None
return s
def model_beyond(fit, params, data, guess_bounds, extrapolate=-1, start=-1):
offset = len(data)+start
N = data['Population'].values[0] # total population
P0 = fit[:,0][offset]
E0 = fit[:,1][offset]
C0 = fit[:,2][offset]
A0 = fit[:,3][offset]
I0 = fit[:,4][offset]
Q0 = fit[:,5][offset]
# Q0 = data['active_cases'].values[0] #fit to active cases instead
R0 = fit[:,6][offset]
# D0 = data[death_metric].values[start]
D0 = fit[:,7][offset]
yz_0 = np.array([P0, E0, C0, A0, I0, Q0, R0, D0])
n = len(data)+extrapolate
args = (params, N, n)
try:
s = scipy.integrate.odeint(pecaiqr, yz_0, np.arange(offset, n), args=args)
# s = scipy.integrate.solve_ivp(fun=lambda t, y: pecaiqr(y, t, params, N, n), t_span=[offset, n], y0=yz_0)
except RuntimeError:
print('RuntimeError', params)
bound_mean, bound_deviation = guess_bounds
scaler = np.random.normal(loc=bound_mean, scale=bound_deviation)
bound = (fit[:,7][offset:])*(1+scaler)
return bound
# return np.zeros((n, len(yz_0)))
return s[:,7]
def estimate_bounds(res, data, fit):
end = len(data)
actual_current = (data["avg_deaths"].values)[3:end]
actual_previous = (data["avg_deaths"].values)[2:-1]
actual_slope = [i - j for i, j in zip(actual_current, actual_previous)]
fit_current = fit[:,7][3:end]
fit_previous = fit[:,7][2:-1]
fit_slope = [i - j for i, j in zip(fit_current, fit_previous)]
slope_ratio = np.array(actual_slope)/np.array(fit_slope)
slope_ratio = reject_outliers(slope_ratio)
mean = None
deviation = None
if len(slope_ratio) > 0:
mean = np.mean(slope_ratio)
deviation = np.std(slope_ratio)
if deviation > 0.2:
deviation = 0.2
if mean < 1-deviation/2:
mean = 1-deviation/2
elif mean > 1 + deviation/2:
mean = 1 + deviation/2
return (mean,deviation)
def quickie(fit, data, guess_bounds, start=-1):
offset = len(data)+start
bound_mean, bound_deviation = guess_bounds
bound = []
predictions = fit[:,7][(offset-1):]
scaler = np.random.normal(loc=bound_mean, scale=bound_deviation)
previous = predictions[0]
for index, point in enumerate(predictions):
if index > 0:
current = predictions[index]
change = current-predictions[index-1]
change = change*(scaler)
bound_point = previous + change
bound.append(bound_point)
previous = bound_point
# previous = current
bound = np.array(bound)
return bound
# returns uncertainty of the fit for all variables
def get_fit_errors(res, p0_params, data, extrapolate=14, start=-1, quick=False, death_metric="deaths"):
population = list(data["Population"])[-1]
errors = get_param_errors(res, population)
errors[len(p0_params):] = 0
fit = model(res.x, data, extrapolate)
guess_bounds = estimate_bounds(res,data,fit)
if guess_bounds == (None, None):
return np.zeros((1,int(len(data)+extrapolate)))
uncertainty = []
samples = 100
if extrapolate > 0 :
if quick:
for i in range(samples):
death_series = quickie(fit, data, guess_bounds, start=start)
latest_D = (data[death_metric].values)[-1]
# death_series = np.concatenate((data[death_metric].values[0:len(data)], death_series[-1*start:]))
death_series = np.concatenate((fit[0:len(data)+start, 7], death_series))
for index, death in enumerate(death_series):
if index >= len(data) and death <= latest_D:
death_series[index] = None
uncertainty.append(death_series)
else:
for i in range(samples):
sample = np.random.normal(loc=res.x, scale=errors)
death_series = model_beyond(fit, sample, data, guess_bounds, extrapolate, start=start)
latest_D = (data[death_metric].values)[-1]
# death_series = np.concatenate((fit[:,7][0:len(data)-1], death_series))
# death_series = np.concatenate((data[death_metric].values[0:len(data)], death_series[-1*start:]))
death_series = np.concatenate((fit[0:len(data)+start, 7], death_series))
for index, death in enumerate(death_series):
if index >= len(data) and death <= latest_D:
death_series[index] = None
uncertainty.append(death_series)
else:
for i in range(samples):
sample = np.random.normal(loc=res.x, scale=errors)
death_series = model(sample, data, extrapolate)
death_series = death_series[:,7]
uncertainty.append(death_series)
uncertainty = np.array(uncertainty)
return uncertainty
def mse_qd(A, B):
Ap = np.nan_to_num(A)
Bp = np.nan_to_num(B)
Ap[A == -np.inf] = 0
Bp[B == -np.inf] = 0
Ap[A == np.inf] = 0
Bp[B == np.inf] = 0
return mean_squared_error(Ap, Bp)
def plot_model(res, data, extrapolate=14, boundary=None, plot_infectious=False, death_metric="deaths"):
s = model(res.x, data, extrapolate=extrapolate)
P = s[:,0]
E = s[:,1]
C = s[:,2]
A = s[:,3]
I = s[:,4]
Q = s[:,5]
R = s[:,6]
D = s[:,7]
t = np.arange(0, len(data))
tp = np.arange(0, len(data)+extrapolate)
p = bokeh.plotting.figure(plot_width=1000,
plot_height=600,
title = ' PECAIQR Model',
x_axis_label = 't (days)',
y_axis_label = '# people')
if plot_infectious:
p.line(tp, I, color = 'red', line_width = 1, legend = 'Currently Infected')
p.line(tp, D, color = 'black', line_width = 1, legend = 'Deceased')
p.line(tp, Q, color = 'green', line_width = 1, legend = 'Quarantined')
p.line(tp, R, color = 'gray', line_width = 1, legend = 'Removed')
p.line(tp, P, color = 'blue', line_width = 1, legend = 'Protected')
p.line(tp, E, color = 'yellow', line_width = 1, legend = 'Exposed')
p.line(tp, C, color = 'orange', line_width = 1, legend = 'Carrier')
p.line(tp, A, color = 'brown', line_width = 1, legend = 'Asymptotic')
# death
p.circle(t, data[death_metric], color ='black', legend='Real Death')
# quarantined
p.circle(t, data['active_cases'], color ='purple', legend='Tested Infected')
if boundary is not None:
vline = bokeh.models.Span(location=boundary, dimension='height', line_color='black', line_width=3)
p.renderers.extend([vline])
p.legend.location = 'top_left'
bokeh.io.show(p)
def plot_with_errors_sample(res, p0_params, data, extrapolate=14, boundary=None, plot_infectious=False, start=-1, quick=False, death_metric="deaths"):
s = model(res.x, data, len(data)+extrapolate)
P = s[:,0]
E = s[:,1]
C = s[:,2]
A = s[:,3]
I = s[:,4]
Q = s[:,5]
R = s[:,6]
D = s[:,7]
uncertainty = get_fit_errors(res, p0_params, data, extrapolate=extrapolate, start=start, quick=quick, death_metric=death_metric)
s1 = np.nanpercentile(uncertainty, 10, axis=0)
s2 = np.nanpercentile(uncertainty, 90, axis=0)
t = np.arange(0, len(data))
tp = np.arange(0, len(data)+extrapolate)
p = bokeh.plotting.figure(plot_width=1000,
plot_height=600,
title = ' PECAIQR Model Errors',
x_axis_label = 't (days)',
y_axis_label = '# people')
p.varea(x=tp, y1=s1, y2=s2, color='black', fill_alpha=0.2)
p.line(tp, D, color = 'black', line_width = 1, legend = 'Deceased')
p.circle(t, data[death_metric], color ='black')
# quarantined
# p.circle(t, data['cases'], color ='purple')
if boundary is not None:
vline = bokeh.models.Span(location=boundary, dimension='height', line_color='black', line_width=3)
p.renderers.extend([vline])
p.legend.location = 'top_left'
bokeh.io.show(p)
return uncertainty
def test_convergence(data_length, pop, predictions):
converge = True
deaths = [death[1] for death in predictions]
diff = deaths[-1] - deaths[data_length-1]
if diff < 0 or deaths[-1] > 0.3*pop:
converge = False
return converge
def fill_nonconvergent(nonconvergent, data, end):
counties_dates = []
counties_death_errors = []
counties_fips = nonconvergent
for index, county in enumerate(nonconvergent):
county_data = loader.query(data, "fips", county)
deaths = county_data["deaths"].values
dates = | pd.to_datetime(county_data["date"].values) | pandas.to_datetime |
import os
from uuid import uuid4
import pytest
from thrift.transport import TSocket, TTransport
from thrift.transport.TSocket import TTransportException
from heavyai import connect
import datetime
import random
import string
import numpy as np
import pandas as pd
heavydb_host = os.environ.get('HEAVYDB_HOST', 'localhost')
def _check_open():
"""
Test to see if OmniSci running on localhost and socket open
"""
socket = TSocket.TSocket(heavydb_host, 6274)
transport = TTransport.TBufferedTransport(socket)
try:
transport.open()
return True
except TTransportException:
return False
@pytest.fixture(scope='session')
def mapd_server():
"""Ensure a mapd server is running, optionally starting one if none"""
if _check_open():
# already running before pytest started
pass
else:
raise RuntimeError(
"Unable to connect to OmniSci server at {}".format(heavydb_host)
)
@pytest.fixture(scope='session')
def con(mapd_server):
"""
Fixture to provide Connection for tests run against live OmniSci instance
"""
return connect(
user="admin",
password='<PASSWORD>',
host=heavydb_host,
port=6274,
protocol='binary',
dbname='omnisci',
)
@pytest.fixture
def mock_client(mocker):
"""A magicmock for heavydb.connection.Client"""
return mocker.patch("heavydb.connection.Client")
def no_gpu():
"""Check for the required GPU dependencies"""
try:
from numba import cuda
import cudf # noqa
try:
cuda.select_device(0)
except cuda.cudadrv.error.CudaDriverError:
return True
except ImportError:
return True
return False
def gen_string():
"""Generate a random string sequence for use in _tests_table_no_nulls"""
return ''.join(
[
random.choice(string.ascii_letters + string.digits)
for n in range(10)
]
)
def _tests_table_no_nulls(n_samples):
"""
Generates a dataframe with all OmniSci types in it for use in integration
testing
"""
np.random.seed(12345)
tinyint_ = np.random.randint(
low=-127, high=127, size=n_samples, dtype='int8'
)
smallint_ = np.random.randint(
low=-32767, high=32767, size=n_samples, dtype='int16'
)
int_ = np.random.randint(
low=-2147483647, high=2147483647, size=n_samples, dtype='int32'
)
bigint_ = np.random.randint(
low=-9223372036854775807,
high=9223372036854775807,
size=n_samples,
dtype='int64',
)
# float and double ranges slightly lower than we support, full width
# causes an error in np.linspace that's not worth tracking down
float_ = np.linspace(-3.4e37, 3.4e37, n_samples, dtype='float32')
double_ = np.linspace(-1.79e307, 1.79e307, n_samples, dtype='float64')
bool_ = np.random.randint(low=0, high=2, size=n_samples, dtype='bool')
# effective date range of 1904 to 2035
# TODO: validate if this is an Arrow limitation, outside this range fails
date_ = [
datetime.date(1970, 1, 1) + datetime.timedelta(days=int(x))
for x in np.random.randint(-24000, 24000, size=n_samples)
]
datetime_ = [
datetime.datetime(1970, 1, 1)
+ datetime.timedelta(days=int(x), minutes=int(x))
for x in np.random.randint(-24000, 24000, size=n_samples)
]
time_h = np.random.randint(0, 24, size=n_samples)
time_m = np.random.randint(0, 60, size=n_samples)
time_s = np.random.randint(0, 60, size=n_samples)
time_ = [datetime.time(h, m, s) for h, m, s in zip(time_h, time_m, time_s)]
# generate random text strings
text_ = [gen_string() for x in range(n_samples)]
# read geo data from files
point_ = pd.read_csv("tests/data/points_10000.zip", header=None).values
point_ = np.squeeze(point_)
line_ = | pd.read_csv("tests/data/lines_10000.zip", header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np, pandas as pd, torch, cv2, os, argparse, math
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib import cm
from pydub import AudioSegment, effects, scipy_effects
from nnAudio import Spectrogram
from yolov5.models.experimental import attempt_load
from yolov5.utils.datasets import letterbox
from yolov5.utils.general import non_max_suppression, scale_coords, xyxy2xywh
from PIL import ImageFont, ImageDraw, Image
def speed_change(sound, speed=1.0):
# Manually override the frame_rate. This tells the computer how many
# samples to play per second
sound_with_altered_frame_rate = sound._spawn(sound.raw_data, overrides={
"frame_rate": int(sound.frame_rate * speed)
})
# convert the sound with altered frame rate to a standard frame rate
# so that regular playback programs will work right. They often only
# know how to play audio at standard frame rate (like 44.1k)
print(sound_with_altered_frame_rate.frame_rate)
return sound_with_altered_frame_rate.set_frame_rate(int(sound.frame_rate*speed))
def AudioStandarize(audio_file, sr, device=None, high_pass=0, ultrasonic=False):
if not device:
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
filext = audio_file[-3:].lower()
if filext == "mp3":
sound = AudioSegment.from_mp3(audio_file)
elif filext == "wma":
sound = AudioSegment.from_file(audio_file, "wma")
elif filext == "m4a":
sound = AudioSegment.from_file(audio_file, "m4a")
elif filext == "ogg":
sound = AudioSegment.from_ogg(audio_file)
elif filext == "wav":
sound = AudioSegment.from_wav(audio_file)
elif filext in ["mp4", "wma", "aac"]:
sound = AudioSegment.from_file(audio_file, filext)
else:
print('Sorry, this file type is not permitted. The legal extensions are: wav, mp3, wma, m4a, ogg.')
return None
original_metadata = {'channel': sound.channels, 'sample_rate':sound.frame_rate, 'sample_size':len(sound.get_array_of_samples()), 'duration':sound.duration_seconds}
print('Origional audio: channel = %s, sample_rate = %s Hz, sample_size = %s, duration = %s s' %(original_metadata['channel'], original_metadata['sample_rate'], original_metadata['sample_size'], original_metadata['duration']))
if ultrasonic:
if sound.frame_rate > 100000: # UltraSonic
sound = speed_change(sound, 1/12)
else:
return False
if sound.frame_rate > sr:
sound = scipy_effects.low_pass_filter(sound, sr/2)
if sound.frame_rate != sr:
sound = sound.set_frame_rate(sr)
if sound.channels > 1:
sound = sound.split_to_mono()[0]
if not sound.sample_width == 2:
sound = sound.set_sample_width(2)
if high_pass:
sound = sound.high_pass_filter(high_pass)
sound = effects.normalize(sound) # normalize max-amplitude to 0 dB
songdata = np.array(sound.get_array_of_samples())
duration = round(np.array(sound.get_array_of_samples()).shape[0]/sound.frame_rate*1000) #ms
audiodata = torch.tensor(songdata, device=device).float()
print('Standarized audio: channel = %s, sample_rate = %s Hz, sample_size = %s, duration = %s s' %(sound.channels, sound.frame_rate, len(sound.get_array_of_samples()), sound.duration_seconds))
return sound.frame_rate, audiodata, duration, sound, original_metadata
class Silic:
"""
Arguments:
sr (int): path of the model
n_fft (int): path of the config file
hop_length (str): select device that model will be loaded (cpu, cuda)
n_mels (int): load pretrained weights into the model
fmin (int): make model ready for inference
fmax (int): if False, yolov5 logs will be silent
clip_length (int):
"""
def __init__(self, sr=32000, n_fft=1600, hop_length=400, n_mels=240, fmin=100, fmax=15000, device=None, clip_length=3000):
self.sr = sr
self.n_fft = n_fft
self.hop_length = hop_length
self.n_mels = n_mels
self.fmin = fmin
self.fmax = fmax
self.clip_length = clip_length
if device:
self.device = device
else:
self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
self.spec_layer = Spectrogram.STFT(sr=sr, n_fft=n_fft, hop_length=hop_length).to(self.device)
self.spec_mel_layer = Spectrogram.MelSpectrogram(sr=sr, n_fft=n_fft, n_mels=n_mels, hop_length=hop_length, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=fmin, fmax=fmax, norm=1, verbose=True).to(self.device)
self.rainbow_img = torch.tensor([], dtype=torch.float32, device=self.device)
self.model_path = None
self.model = None
self.names = None
def audio(self, audio_file, ultrasonic=False):
self.audiofilename = os.path.basename(audio_file)
self.audiofilename_without_ext = os.path.splitext(self.audiofilename)[0]
self.audiopath = os.path.dirname(audio_file)
self.audiofileext = audio_file.split('.')[-1]
self.sr, self.audiodata, self.duration, self.sound, self.original_metadata = AudioStandarize(audio_file, self.sr, self.device, high_pass=self.fmin, ultrasonic=ultrasonic)
def save_standarized(self, targetmp3path=None):
if not targetmp3path:
targetmp3path = os.path.join(self.audiopath, 'mp3', '%s.mp3'%self.audiofilename_without_ext)
if not os.path.isdir(os.path.dirname(targetmp3path)):
os.mkdir(os.path.dirname(targetmp3path))
self.sound.export(targetmp3path, bitrate="128k", format="mp3")
print('Standarized audio was saved to %s' %targetmp3path)
return targetmp3path
def spectrogram(self, audiodata, spect_type='linear', rainbow_bands=5):
plt.rcParams['font.size'] = '16'
plt.rcParams['axes.grid'] = False
plt.rcParams['xtick.labelsize'] = False
plt.rcParams['ytick.labelsize'] = False
plt.rcParams['xtick.top'] = False
plt.rcParams['xtick.bottom'] = False
plt.rcParams['ytick.left'] = False
plt.rcParams['ytick.right'] = False
plt.rcParams.update({'font.size': 16})
if spect_type in ['mel', 'rainbow']:
spec = self.spec_mel_layer(audiodata)
w = spec.size()[2]/55
h = spec.size()[1]/55
if spect_type == 'mel':
fig = plt.figure(figsize=(w, h), dpi=100)
data = torch.sqrt(torch.sqrt(torch.abs(spec[0]) + 1e-6)).cpu().numpy()
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.imshow(data, origin='lower', cmap='gray_r', aspect='auto')
elif rainbow_bands > 1:
fig, ax = plt.subplots(rainbow_bands, gridspec_kw = {'wspace':0, 'hspace':0}, figsize=(w, h))
fig.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
data = torch.log(torch.log(spec[0] + 1e-6))
for i in range(rainbow_bands):
subdata = data[i*int(self.n_mels/rainbow_bands):(i+1)*int(self.n_mels/rainbow_bands)].cpu().numpy()
ax[rainbow_bands-i-1].set_axis_off()
ax[rainbow_bands-i-1].pcolormesh(subdata, cmap=ListedColormap(cm.rainbow(np.linspace((i+1)/rainbow_bands, (i/rainbow_bands), 32))), rasterized=True)
else:
print('Bins of Rainbow should larger than 0.')
return False
else:
spec = self.spec_layer(audiodata)
data = torch.sqrt(torch.sqrt(torch.abs(spec[0]) + 1e-6)).cpu().numpy()[:,:,0]
w = data.shape[1]/100*(5/4)*2
h = data.shape[0]/100*(1/4)*2
fig = plt.figure(figsize=(w, h), dpi=100)
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.imshow(data, origin='lower', cmap='gray_r', aspect='auto')
"""
plt.savefig(targetfilepath)
if show:
plt.show()
if spect_type == 'rainbow' and rainbow_bands == 5:
self.rainbow_img = self.cv2_img
"""
fig.canvas.draw()
img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
cv2_img = img #cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
plt.close(fig)
return cv2_img
def tfr(self, targetfilepath=None, spect_type='linear', rainbow_bands=5, start=0, stop=None):
if self.clip_length and ((self.audiodata.size()[0] / self.sr * 1000) < self.clip_length):
self.audiodata = torch.cat((self.audiodata, torch.zeros(round(self.clip_length*self.sr/1000)-self.audiodata.size()[0], device=self.device)), 0)
if not stop:
stop = self.duration
max_sample_size = 1920000
tmpimgs = []
if not targetfilepath:
targetfilepath = os.path.join(self.audiopath, spect_type, '%s.jpg'%self.audiofilename_without_ext)
if not os.path.isdir(os.path.dirname(targetfilepath)):
os.mkdir(os.path.dirname(targetfilepath))
if not os.path.isdir(os.path.dirname(targetfilepath)):
print('Error! Cannot find the target folder %s.' %os.path.dirname(targetfilepath))
exit()
if (stop - start)/1000*self.sr > (max_sample_size):
if not os.path.exists('tmp'):
try:
os.mkdir('tmp')
except:
print('Cannot create tmp folder!')
exit()
imgs = []
for ts in range(int(round(start/1000*self.sr)), int(round(stop/1000*self.sr)-self.sr*0.1), max_sample_size):
if ts+max_sample_size > round(stop/1000*self.sr):
data = self.audiodata[ts:round(stop/1000*self.sr)+1]
else:
data = self.audiodata[ts:ts+max_sample_size]
try:
imgs.append(self.spectrogram(data, spect_type, rainbow_bands=rainbow_bands))
except:
print('error in converting', tmpimg)
exit()
self.cv2_img = cv2.hconcat(imgs)
else:
self.cv2_img = self.spectrogram(self.audiodata[int(round(start/1000*self.sr)):int(round(stop/1000*self.sr))], spect_type, rainbow_bands=rainbow_bands)
if spect_type == 'rainbow' and rainbow_bands == 5:
self.rainbow_img = cv2.cvtColor(self.cv2_img, cv2.COLOR_RGB2BGR)
height, width, colors = self.cv2_img.shape
#cv2.imwrite(targetfilepath, self.cv2_img)
PILimage = Image.fromarray(self.cv2_img)
try:
PILimage.save(targetfilepath, dpi=(72,72))
except:
targetfilepath = '%spng' %targetfilepath[:-3]
PILimage.save(targetfilepath, dpi=(72,72))
print('Spectrogram was saved to %s.'%targetfilepath)
return targetfilepath
def mel_to_freq(self, mel):
if mel < 0:
return self.fmin
mel = mel*(1127*np.log(1+self.fmax/700)-1127*np.log(1+self.fmin/700)) + 1127*np.log(1+self.fmin/700)
return round((700*(np.exp(mel/1127)-1)).astype('float32'))
def xywh2ttff(self, xywh):
x, y, w, h = list(xywh)
ts = round((x-w/2)*self.clip_length)
te = round((x+w/2)*self.clip_length)
fl = self.mel_to_freq(1-(y+h/2))
fh = self.mel_to_freq(1-(y-h/2))
return [ts, te, fl, fh]
def detect(self, weights, step=1000, conf_thres=0.1, imgsz=640, targetfilepath=None, iou_thres=0.25, soundclasses=None):
if self.model and self.model_path == weights:
pass
else:
self.model_path = weights
self.model = attempt_load(self.model_path, map_location=self.device)
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
if soundclasses:
classes = [self.names.index(name) for name in soundclasses]
else:
classes = None
self.tfr(targetfilepath=targetfilepath, spect_type='rainbow')
# prepare input data clips
dataset = []
for ts in range(0, self.duration, step):
clip_start = round(ts/self.duration*self.rainbow_img.shape[1])
clip_end = clip_start+round(self.clip_length/self.duration*self.rainbow_img.shape[1])
if clip_end > self.rainbow_img.shape[1]:
break
img0 = self.rainbow_img[:,clip_start:clip_end]
img = letterbox(img0, new_shape=imgsz)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
dataset.append([os.path.join(self.audiopath, self.audiofilename), img, img0, ts])
labels = [['file', 'classid', "time_begin", "time_end", "freq_low", "freq_high", "score"]]
for path, img, im0, time_start in dataset:
img = torch.from_numpy(img).float().to(self.device)
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=False)[0]
pred = non_max_suppression(pred, conf_thres=conf_thres, iou_thres=iou_thres, classes=classes)
for det in pred: # detections per image
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
for *xyxy, conf, cls in reversed(det):
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
ttff = self.xywh2ttff(xywh)
ts, te, fl, fh = ttff
labels.append([path, self.names[int(cls)], round(time_start+ts), round(time_start+te), fl, fh, round(float(conf),3)])
return labels
def get_iou(bb1, bb2):
"""
© https://github.com/MartinThoma/algorithms/blob/master/CV/IoU/IoU.py
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0, 0.0, 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
i_ration_bb1 = intersection_area / bb1_area
i_ration_bb2 = intersection_area / bb2_area
assert iou >= 0.0
assert iou <= 1.0
return iou, i_ration_bb1, i_ration_bb2
def merge_boxes(bb1, bb2):
x1 = bb1['x1']
x2 = bb1['x2']
y1 = bb1['y1']
y2 = bb1['y2']
if bb2['x1'] < bb1['x1']:
x1 = bb2['x1']
if bb2['x2'] > bb1['x2']:
x2 = bb2['x2']
if bb2['y1'] < bb1['y1']:
y1 = bb2['y1']
if bb2['y2'] > bb1['y2']:
y2 = bb2['y2']
return {'x1':x1, 'x2':x2, 'y1':y1, 'y2':y2}
def clean_multi_boxes(labels, threshold_iou=0.25, threshold_iratio=0.9):
df = pd.DataFrame(labels[1:],columns=labels[0])
df = df.sort_values('time_begin')
df_results = | pd.DataFrame() | pandas.DataFrame |
from ast import literal_eval as make_tuple
from itertools import groupby
import pandas as pd
from pm4py.objects.log.log import Trace
from src.encoding.declare.declare_mining import filter_candidates_by_support, generate_train_candidate_constraints, \
transform_results_to_numpy
from src.encoding.declare.declare_templates import template_sizes
def xes_to_positional(log, label=True):
"""
[
{tracename:name, tracelabel:label,
events:{event_a : [1,2,3], event_b : [4,5,6], event_c : [7,8,9]} }
]
:param log:
:return:
"""
return {
trace.attributes['concept:name']: {
key: [item[0] for item in group]
for key, group in groupby(sorted(enumerate([event['concept:name'] for event in trace]), key=lambda x: x[1]), lambda x: x[1])
}
for trace in log
}
def declare_encoding(log, labelling, encoding, additional_columns, cols=None): #TODO JONAS
"""creates and returns the DataFrame encoded using the declare encoding
:param log:
:param labelling:
:param encoding:
:param additional_columns:
:param cols:
:return:
"""
filter_t = True
print("Filter_t", filter_t)
templates = template_sizes.keys()
constraint_threshold = 0.1
candidate_threshold = 0.1
#apply prefix
log = [Trace(trace[:encoding.prefix_length], attributes=trace.attributes) for trace in log]
# Read into suitable data structure
transformed_log = xes_to_positional(log)
labels = {trace.attributes['concept:name']: trace.attributes['label'] for trace in log}
# Extract unique activities from log
events_set = {event_label for tid in transformed_log for event_label in transformed_log[tid]}
# Brute force all possible candidates
if cols is None:
candidates = [(event,) for event in events_set] + [(e1, e2) for e1 in events_set for e2 in events_set if e1 != e2]
else:
candidates = list({
make_tuple(c.split(':')[1]) if len(c.split(':')) > 1 else c
for c in cols
if c not in ['label', 'trace_id']
})
print("Start candidates:", len(candidates))
# Count by class
true_count = len([trace.attributes['concept:name'] for trace in log if trace.attributes['label'] == 'true'])
false_count = len(log) - true_count
print("{} deviant and {} normal traces in set".format(false_count, true_count))
ev_support_true = int(true_count * candidate_threshold)
ev_support_false = int(false_count * candidate_threshold)
if filter_t and cols is None:
print(filter_t)
print("Filtering candidates by support")
candidates = filter_candidates_by_support(candidates, transformed_log, labels, ev_support_true, ev_support_false)
print("Support filtered candidates:", len(candidates))
constraint_support_false = int(false_count * constraint_threshold)
constraint_support_true = int(true_count * constraint_threshold)
train_results = generate_train_candidate_constraints(candidates, templates, transformed_log, labels, constraint_support_true, constraint_support_false, filter_t=filter_t)
print("Candidate constraints generated")
# transform to numpy
# get trace names
data, labels, featurenames, train_names = transform_results_to_numpy(train_results, labels, transformed_log, cols)
df = | pd.DataFrame(data, columns=featurenames) | pandas.DataFrame |
import numpy as np
import pandas as pd
class HMM:
"""
Implementation of Filtering, Smoothing, Decoding(Viterbi) and Prediction
for Hidden Markov Models
"""
def __init__(self, T:np.ndarray, M:np.ndarray, state_list:list, obs_dict:dict):
"""
Parameters:
-----------
T : transition probability matrix
numpy array of shape [d,d] where d is number of states
columns should sum to one
| S_{t-1}[0] | S_{t-1}[1] |
S_{t}[0]|____________|____________|
S_{t}[1]|____________|____________|
M : observation probability matrix
numpy array of shape [d,m] where m is the number of possible observations
rows should sum to one
| O[1] | O[2] | ... | O[m] |
S_{t}[0]|______|______| ... |______|
S_{t}[1]|______|______| ... |______|
state_list : list mapping states to row numbers
obs_dict : dictionary mapping observations to column numbers
Example:
--------
M = np.array([[1/6,1/6,1/6,1/6,1/6,1/6],
[1/10,1/10,1/10,1/10,1/10,1/2]])
T = np.array([[0.95, 0.05],
[0.05, 0.95]])
state_list = ['Fair','Loaded']
meas_dict = {'1':0, '2': 1, '3':2, '4': 3, '5':4, '6': 5}
"""
self.T = T
self.M = M
self.d = T.shape[0]
self.m = M.shape[1]
self.state_list = state_list
self.obs_dict = obs_dict
def filtering(self, obs:list, init_belief:np.ndarray, normalize:bool=True):
"""
Perform filtering on the given observation sequence
Finds: P(S_t | o_{1:t})
Parameters:
-----------
obs: list of observations
init_belief: np.array of the initial belief of states
normalize: whether to normalize the belief values (to interpret as probabilities)
Example:
--------
obs = ['1','2','4','6','6','6','3','6']
init_belief = np.array([0.8,0.2])
"""
# obs = ['1','2','4','6','6','6','3','6']
assert len(init_belief) == self.d, "Initial belief should be for all possible states"
assert np.sum(init_belief) == 1, "Sum of initial belief should be equal to 1"
p_t = init_belief
P_t = []
P_t.append(init_belief)
for i in range(len(obs)):
p_t_non_norm = self.M[:,self.obs_dict[obs[i]]]*self.T.dot(p_t)
# normalize
if normalize:
p_t = p_t_non_norm/np.sum(p_t_non_norm)
P_t.append(p_t)
return np.array(P_t)
def smoothing(self, obs:list, init_belief:np.ndarray, normalize: bool = True):
"""
Perform smoothing on the sequence of observations
Finds: P(S_{k}|o_{1:t}) for k<t
Parameters:
-----------
obs: list of observations
init_belief: np.array of the initial belief of states
normalize: whether to normalize the belief values (to interpret as probabilities)
Example:
--------
obs = ['1','2','4','6','6','6','3','6']
init_belief = np.array([0.8,0.2])
"""
p_hat = self.filtering(obs, init_belief)
b_kt = [] # will add in a reverse manner; have to flip upside down
b_kt.append(np.ones(self.d))
for i in range(len(obs)):
b_mt = (b_kt[i] * self.M[:,self.obs_dict[obs[len(obs)-i-1]]]).dot(self.T) # (bmt*M[:,o]).T
b_kt.append(b_mt)
b_kt = np.flipud(b_kt) # flipping
p_tilde_non_norm = b_kt*p_hat
# normalize
if normalize:
p_tilde = p_tilde_non_norm/(np.sum(p_tilde_non_norm,axis=1))[:,None]
return p_hat, p_tilde
def get_smoothing_table(self, obs:list, init_belief:np.ndarray, normalize:bool=True):
"""
Perform smoothing and filtering on the sequence of observations
and print a table
Parameters:
-----------
obs: list of observations
init_belief: np.array of the initial belief of states
normalize: whether to normalize the belief values (to interpret as probabilities)
Example:
--------
obs = ['1','2','4','6','6','6','3','6']
init_belief = np.array([0.8,0.2])
"""
p_hat, p_tilde = self.smoothing(obs,init_belief, normalize)
p_hat_r = np.around(p_hat,4)
p_tilde_r = np.around(p_tilde,4)
df = | pd.DataFrame() | pandas.DataFrame |
def update_rel_frame_time(org_frame_time, duration):
return round(org_frame_time - duration, 7)
def replace_src_with_dst(col_name):
if 'src' in col_name:
col_name = col_name.replace('src', 'dst')
else:
if 'dst' in col_name:
col_name = col_name.replace('dst', 'src')
return col_name
def preprocess_client_dsp_data(csv_file_path):
import pandas as pd
client_data = | pd.read_csv(csv_file_path) | pandas.read_csv |
import datetime
import logging
import os
import pandas as pd
from ..models.order import Order
from ..models.price import Price
from ..models.dealer import Dealer
from yahooquery import Ticker
from pandas import DataFrame
class YQBroker(Dealer):
cache_file: str = '../data/yq_broker_data.csv'
ticker: Ticker
def __init__(self):
self.historical_data = DataFrame()
def get_symbol(self, asset):
if type(asset) is str:
return asset + '.sa'
return [a + '.sa' for a in asset]
def get_asset_balance(self, symbols):
logging.warning("get_asset_balance not implemented for YQBroker.")
if type(symbols) is str:
return 0
return len(symbols) * [0]
def symbol_ticker(self, symbols):
logging.warning("symbol_ticker not implemented for YQBroker.")
if type(symbols) is str:
return Price()
return [Price() for _ in range(len(symbols))]
def symbol_ticker_candle(self, symbols, interval: int):
logging.warning("symbol_ticker_candle not implemented for YQBroker.")
pass
def check_symbol_history(self, symbol, start: datetime, end=None):
print(symbol)
logging.info("checking history for: %s", symbol)
symbol_data = self.historical_data[self.historical_data.symbol == self.get_symbol(symbol)]
if len(symbol_data) == 0:
logging.info("downloading history for: %s", symbol)
data = Ticker(symbol).history(start=start)
data.to_csv('tmp')
data = pd.read_csv('tmp')
data['date'] = pd.to_datetime(data['date'])
self.historical_data = pd.concat([self.historical_data, data], ignore_index=True)
print(self.historical_data)
def download_history(self, symbols, start: datetime, end=None, interval='1d'):
self.ticker = Ticker(self.get_symbol(symbols))
if os.path.isfile(self.cache_file):
logging.info("loading from cache file: %s", self.cache_file)
self.historical_data = pd.read_csv(self.cache_file)
for s in self.get_symbol(symbols):
self.check_symbol_history(s, start)
self.historical_data.to_csv(self.cache_file)
else:
logging.info("downloading history for: {}", symbols)
self.historical_data = self.ticker.history(start=start)
logging.info(" storing cache into %s", self.cache_file)
self.historical_data.to_csv(self.cache_file)
logging.info("... loaded %d lines.", len(self.historical_data))
if len(self.historical_data) == 0:
logging.error("empty historical_cache after download.")
self.historical_data = pd.read_csv(self.cache_file)
# convert date to datetime
self.historical_data['date'] = | pd.to_datetime(self.historical_data['date']) | pandas.to_datetime |
import ccxt
import config
import schedule
import pandas as pd
import numbers
pd.set_option('display.max_rows', None)
import warnings
warnings.filterwarnings('ignore')
from datetime import datetime
import time
from stockstats import StockDataFrame as Sdf
# how much quote currency example [DOGE] you want to spend on every trade
BALANCE = 0.5
# how percent you want to earn on every trade
PERCENT_OF_GAIN_IN_FIAT_CURRENCY = 10
# the crypto that yiou want to invest in
symbol = 'BNB/USDT'
# RSI parameters
RSI_OVERBOUGHT = 70
RSI_OVERSOLD = 30
# trade mode
TRADE = True
EXCHANGE = ccxt.binance({
'options': {
'adjustForTimeDifference': True,
},
'enableRateLimit': True,
"apiKey": config.BINANCE_API_KEY,
"secret": config.BINANCE_SECRET_KEY,
})
EXCHANGE.load_markets()
def create_stock(historical_data):
stock = Sdf.retype(historical_data)
return stock
def in_position(amount, price, limits, precision):
global symbol
condition = limits['amount']['min'] <= amount <= limits['amount']['max'] and limits['price'][
'min'] <= price <= limits['price']['max'] and precision['price'] >= float(
EXCHANGE.price_to_precision(symbol, price)) and (amount * price) >= limits['cost']['min'] and not isinstance(
limits['cost']['max'], numbers.Number) or (
isinstance(limits['cost']['max'], numbers.Number) and (amount * price) <= limits['cost']['max'])
return condition
def info(df):
data = dict()
online_balance = EXCHANGE.fetchBalance()
data["quote"] = online_balance['total'][symbol.split("/")[0]]
data["price"] = df['close'][len(df) - 1]
limits = EXCHANGE.markets[symbol]['limits']
precision = EXCHANGE.markets[symbol]['precision']
data["sell_fees"] = EXCHANGE.markets[symbol]['taker']
data["buy_fees"] = EXCHANGE.markets[symbol]['maker']
data["fiat"] = float((online_balance['total'][symbol.split("/")[1]] / data["price"]) * (1 - data["buy_fees"]))
if data["fiat"] >= BALANCE:
data["fiat"] = BALANCE
data["in_position_to_buy"] = in_position(data["fiat"], data["price"], limits, precision)
data["in_position_to_sell"] = in_position(data["quote"], data["price"], limits, precision)
return data
def sell(data):
print(f"before sell {data}")
# try to find open trades and sell them if the sell price is good and do not cost any charges
try:
trade_history = | pd.read_csv("trades.csv") | pandas.read_csv |
# gpu_id = None
# if len(sys.argv) == 2:
# gpu_id = sys.argv[1]
# if not gpu_id:
# raise Exception('insert gpu_id')
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#
# os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
import sys
sys.path.append('../')
from wrappers.bioc_wrapper import bioc_to_docs, bioc_to_relevances
from wrappers.pandas_wrapper import relevances_to_pandas, docs_to_pandasdocs
from mlearning.dl import DL_preprocessing
from mlearning.dl_models import Burns_CNN, Burns_LSTM, Chollet_DNN, DNN, Burns_CNN2, Burns_BiLSTM, Burns_CNN3, Burns_CNNBiLSTM
from mlearning.dl_models import Hierarchical_Attention, Hierarchical_Attention_v2, DeepDTA
from mlearning.embeddings import compute_embedding_matrix, glove_embeddings_2
from mlearning.ml import serialize_config, deserialize_config
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
from mlearning.dl_config import DLConfig
from tensorflow.keras.preprocessing import text
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
from nltk.corpus import stopwords
import seaborn as sns
from openpyxl import load_workbook
import pandas as pd
import os
from keras import backend as K
import tensorflow as tf
from tfdeterminism import patch
patch()
# gpu_id = None
# if len(sys.argv) == 2:
# gpu_id = sys.argv[1]
# if not gpu_id:
# raise Exception('insert gpu_id')
# gpus = tf.config.experimental.list_physical_devices('GPU')
seed_value= 123123
os.environ['pythongpu'] = str(seed_value)
tf.random.set_seed(seed_value)
# with K.tf.device(gpu):
# config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=4,\
# inter_op_parallelism_threads=4, allow_soft_placement=True,\
# device_count = {'CPU' : 1, 'GPU' : 1})
# config.gpu_options.allow_growth = True
# session = tf.compat.v1.Session(config=config)
# K.set_session(session)
from keras import backend as K
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.compat.v1.Session(config=config)
K.set_session(sess)
#Parameters
model_name= 'deepdta'
stop_words = set(stopwords.words('english')) #####
#stop_words = None
lower = True #####
remove_punctuation = False
split_by_hyphen = True
lemmatization = False #####
stems = False #####
padding = 'pre' #'pre' -> default; 'post' -> alternative
truncating = 'pre' #'pre' -> default; 'post' -> alternative #####
oov_token = 'OOV'
epochs = 20
batch_size = 64 # e aumentar o batch
learning_rate = 0.0001 #experimentar diminuir
max_sent_len = 400 #sentences will have a maximum of "max_sent_len" words #400/500
max_nb_words = 100_000 #it will only be considered the top "max_nb_words" words in the dataset
max_nb_sentences = None # set only for the hierarchical attention model!!!
embeddings = 'pubmed_pmc'
validation_percentage = 10
if embeddings == 'glove':
embedding_path = 'D:/desktop/tese/embeddings/glove/glove.6B.300d.txt'
#embedding_path = '/home/malves/embeddings/glove/glove.840B.300d.txt'
embedding_dim = 300
embedding_format = 'glove'
elif embeddings == 'biowordvec':
#BioWordVec_extrinsic #lowercase #200dimensions
embedding_path = 'D:/desktop/tese/embeddings/biowordvec/bio_embedding_extrinsic'
#embedding_path = '/home/malves/embeddings/biowordvec/bio_embedding_extrinsic'
embedding_dim = 200
embedding_format = 'word2vec'
elif embeddings == 'pubmed_pmc': #200 dimensions
embedding_path = 'D:/desktop/tese/embeddings/pubmed_pmc/PubMed-and-PMC-w2v.bin'
#embedding_path = '/home/malves/embeddings/pubmed_pmc/PubMed-and-PMC-w2v.bin'
embedding_dim = 200
embedding_format = 'word2vec'
else:
raise Exception("Please Insert Embeddings Type")
es_patience = 30 #early-stopping patience
# keras_callbacks = [
# EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=es_patience),
# ModelCheckpoint('best_model.h5', monitor='val_f1_score', mode='max', verbose=1, save_best_only=True)
# ]
keras_callbacks = None
train_dataset_path = '../datasets/PMtask_Triage_TrainingSet.xml'
test_dataset_path = '../datasets/PMtask_Triage_TestSet.xml'
output_excel = 'metrics/results_' + model_name + '.xlsx'
#Pipeline
#Load Data
docs_train = bioc_to_docs(train_dataset_path, stop_words=stop_words, lower=lower, remove_punctuation=remove_punctuation,
split_by_hyphen=split_by_hyphen, lemmatization=lemmatization, stems=stems)
relevances_train = bioc_to_relevances(train_dataset_path, 'protein-protein')
x_train = docs_to_pandasdocs(docs_train)
y_train = relevances_to_pandas(x_train, relevances_train)
#Preprocessing for Training Data
path = '../models/configs/' + model_name + '.txt'
tokenizer = text.Tokenizer(num_words=max_nb_words, oov_token=oov_token)
our_sent = tokenizer.texts_to_sequences([x_train['Document'][0].fulltext_string])
for i, tok in enumerate(x_train['Document'][0].fulltext_tokens):
print(i, ': ', tok)
dl_config = DLConfig(path=path, tokenizer=tokenizer, max_sent_len=max_sent_len, max_nb_sentences=max_nb_sentences,
embedding_dim=embedding_dim, embedding_path=embedding_path, max_nb_words=max_nb_words)
# x_train, y_train, x_val, y_val = DL_preprocessing(x_train, y_train, dl_config, set='train',
# validation_percentage = validation_percentage, seed_value=seed_value,
# padding=padding, truncating=truncating)
x_train_title, x_train_abstract, y_train, x_val_title, x_val_abstract, y_val = DL_preprocessing(x_train, y_train, dl_config, set='train',
validation_percentage = validation_percentage, seed_value=seed_value,
padding=padding, truncating=truncating, model='DeepDTA')
embedding_matrix = compute_embedding_matrix(dl_config, embeddings_format = embedding_format)
#Deep Learning models
#model = Hierarchical_Attention(embedding_matrix, dl_config, seed_value=seed_value)
#model = Hierarchical_Attention_v2(embedding_matrix, dl_config, seed_value=seed_value)
#model = Burns_CNNBiLSTM(embedding_matrix, dl_config, seed_value=seed_value)
#model = Burns_CNN(embedding_matrix, dl_config, seed_value=seed_value)
model = DeepDTA(embedding_matrix, dl_config)
# history = model.fit(x_train, y_train,
# epochs=epochs,
# batch_size=batch_size,
# validation_data=(x_val, y_val),
# callbacks=keras_callbacks)
history = model.fit(([x_train_title, x_train_abstract]), y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=([x_val_title, x_val_abstract], y_val),
callbacks=keras_callbacks)
dl_config.classifier = model
#clf = load_model('best_model.h5')
#EVALUATION
#_, train_acc, train_f1_score = model.evaluate(x_train, y_train, verbose=0)
_, train_acc, train_f1_score = model.evaluate([x_train_title, x_train_abstract], y_train, verbose=0)
print('Training Accuracy: %.3f' % (train_acc))
print('Training F1_score: %.3f' % (train_f1_score))
print(history.history)
if os.path.exists(output_excel):
reader = | pd.read_excel('metrics/results_'+model_name+'.xlsx') | pandas.read_excel |
"""Functions to calculate mean squared displacements from trajectory data
This module includes functions to calculate mean squared displacements and
additional measures from input trajectory datasets as calculated by the
Trackmate ImageJ plugin.
"""
import warnings
import random as rand
import pandas as pd
import numpy as np
import numpy.ma as ma
import scipy.stats as stats
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import diff_classifier.aws as aws
def nth_diff(dataframe, n=1, axis=0):
"""Calculates the nth difference between vector elements
Returns a new vector of size N - n containing the nth difference between
vector elements.
Parameters
----------
dataframe : pandas.core.series.Series of int or float
Input data on which differences are to be calculated.
n : int
Function calculated xpos(i) - xpos(i - n) for all values in pandas
series.
axis : {0, 1}
Axis along which differences are to be calculated. Default is 0. If 0,
input must be a pandas series. If 1, input must be a numpy array.
Returns
-------
diff : pandas.core.series.Series of int or float
Pandas series of size N - n, where N is the original size of dataframe.
Examples
--------
>>> df = np.ones((5, 10))
>>> nth_diff(df)
array([[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> df = np.ones((5, 10))
>>> nth_diff (df)
array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
"""
assert isinstance(n, int), "n must be an integer."
if dataframe.ndim == 1:
length = dataframe.shape[0]
if n <= length:
test1 = dataframe[:-n].reset_index(drop=True)
test2 = dataframe[n:].reset_index(drop=True)
diff = test2 - test1
else:
diff = np.array([np.nan, np.nan])
else:
length = dataframe.shape[0]
if n <= length:
if axis == 0:
test1 = dataframe[:-n, :]
test2 = dataframe[n:, :]
else:
test1 = dataframe[:, :-n]
test2 = dataframe[:, n:]
diff = test2 - test1
else:
diff = np.array([np.nan, np.nan])
return diff
def msd_calc(track, length=10):
"""Calculates mean squared displacement of input track.
Returns numpy array containing MSD data calculated from an individual track.
Parameters
----------
track : pandas.core.frame.DataFrame
Contains, at a minimum a 'Frame', 'X', and 'Y' column
Returns
-------
new_track : pandas.core.frame.DataFrame
Similar to input track. All missing frames of individual trajectories
are filled in with NaNs, and two new columns, MSDs and Gauss are added:
MSDs, calculated mean squared displacements using the formula
MSD = <(xpos-x0)**2>
Gauss, calculated Gaussianity
Examples
--------
>>> data1 = {'Frame': [1, 2, 3, 4, 5],
... 'X': [5, 6, 7, 8, 9],
... 'Y': [6, 7, 8, 9, 10]}
>>> df = pd.DataFrame(data=data1)
>>> new_track = msd.msd_calc(df, 5)
>>> data1 = {'Frame': [1, 2, 3, 4, 5],
... 'X': [5, 6, 7, 8, 9],
... 'Y': [6, 7, 8, 9, 10]}
>>> df = pd.DataFrame(data=data1)
>>> new_track = msd.msd_calc(df)
"""
meansd = np.zeros(length)
gauss = np.zeros(length)
new_frame = np.linspace(1, length, length)
old_frame = track['Frame']
oldxy = [track['X'], track['Y']]
fxy = [interpolate.interp1d(old_frame, oldxy[0], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[1], bounds_error=False,
fill_value=np.nan)]
intxy = [ma.masked_equal(fxy[0](new_frame), np.nan),
ma.masked_equal(fxy[1](new_frame), np.nan)]
data1 = {'Frame': new_frame,
'X': intxy[0],
'Y': intxy[1]
}
new_track = pd.DataFrame(data=data1)
for frame in range(0, length-1):
xy = [np.square(nth_diff(new_track['X'], n=frame+1)),
np.square(nth_diff(new_track['Y'], n=frame+1))]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
meansd[frame+1] = np.nanmean(xy[0] + xy[1])
gauss[frame+1] = np.nanmean(xy[0]**2 + xy[1]**2
)/(2*(meansd[frame+1]**2))
new_track['MSDs'] = pd.Series(meansd, index=new_track.index)
new_track['Gauss'] = pd.Series(gauss, index=new_track.index)
return new_track
def all_msds(data):
"""Calculates mean squared displacements of a trajectory dataset
Returns numpy array containing MSD data of all tracks in a trajectory
pandas dataframe.
Parameters
----------
data : pandas.core.frame.DataFrame
Contains, at a minimum a 'Frame', 'Track_ID', 'X', and
'Y' column. Note: it is assumed that frames begins at 1, not 0 with this
function. Adjust before feeding into function.
Returns
-------
new_data : pandas.core.frame.DataFrame
Similar to input data. All missing frames of individual trajectories
are filled in with NaNs, and two new columns, MSDs and Gauss are added:
MSDs, calculated mean squared displacements using the formula
MSD = <(xpos-x0)**2>
Gauss, calculated Gaussianity
Examples
--------
>>> data1 = {'Frame': [1, 2, 3, 4, 5, 1, 2, 3, 4, 5],
... 'Track_ID': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
... 'X': [5, 6, 7, 8, 9, 1, 2, 3, 4, 5],
... 'Y': [6, 7, 8, 9, 10, 2, 3, 4, 5, 6]}
>>> df = pd.DataFrame(data=data1)
>>> all_msds(df)
"""
trackids = data.Track_ID.unique()
partcount = trackids.shape[0]
length = int(max(data['Frame']))
new = {}
new['length'] = partcount*length
new['frame'] = np.zeros(new['length'])
new['ID'] = np.zeros(new['length'])
new['xy'] = [np.zeros(new['length']),
np.zeros(new['length'])]
meansd = np.zeros(new['length'])
gauss = np.zeros(new['length'])
for particle in range(0, partcount):
single_track = data.loc[data['Track_ID'] ==
trackids[particle]
].sort_values(['Track_ID', 'Frame'],
ascending=[1, 1]
).reset_index(drop=True)
if particle == 0:
index1 = 0
index2 = length
else:
index1 = index2
index2 = index2 + length
new['single_track'] = msd_calc(single_track, length=length)
new['frame'][index1:index2] = np.linspace(1, length, length)
new['ID'][index1:index2] = particle+1
new['xy'][0][index1:index2] = new['single_track']['X']
new['xy'][1][index1:index2] = new['single_track']['Y']
meansd[index1:index2] = new['single_track']['MSDs']
gauss[index1:index2] = new['single_track']['Gauss']
data1 = {'Frame': new['frame'],
'Track_ID': new['ID'],
'X': new['xy'][0],
'Y': new['xy'][1],
'MSDs': meansd,
'Gauss': gauss}
new_data = pd.DataFrame(data=data1)
return new_data
def make_xyarray(data, length=651):
"""Rearranges xy position data into 2d arrays
Rearranges xy data from input pandas dataframe into 2D numpy array.
Parameters
----------
data : pd.core.frame.DataFrame
Contains, at a minimum a 'Frame', 'Track_ID', 'X', and
'Y' column.
length : int
Desired length or number of frames to which to extend trajectories.
Any trajectories shorter than the input length will have the extra space
filled in with NaNs.
Returns
-------
xyft : dict of np.ndarray
Dictionary containing xy position data, frame data, and trajectory ID
data. Contains the following keys:
farray, frames data (length x particles)
tarray, trajectory ID data (length x particles)
xarray, x position data (length x particles)
yarray, y position data (length x particles)
Examples
--------
>>> data1 = {'Frame': [0, 1, 2, 3, 4, 2, 3, 4, 5, 6],
... 'Track_ID': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
... 'X': [5, 6, 7, 8, 9, 1, 2, 3, 4, 5],
... 'Y': [6, 7, 8, 9, 10, 2, 3, 4, 5, 6]}
>>> df = pd.DataFrame(data=data1)
>>> length = max(df['Frame']) + 1
>>> xyft = msd.make_xyarray(df, length=length)
{'farray': array([[0., 0.],
[1., 1.],
[2., 2.],
[3., 3.],
[4., 4.],
[5., 5.],
[6., 6.]]),
'tarray': array([[1., 2.],
[1., 2.],
[1., 2.],
[1., 2.],
[1., 2.],
[1., 2.],
[1., 2.]]),
'xarray': array([[ 5., nan],
[ 6., nan],
[ 7., 1.],
[ 8., 2.],
[ 9., 3.],
[nan, 4.],
'yarray': [nan, 5.]]),
array([[ 6., nan],
[ 7., nan],
[ 8., 2.],
[ 9., 3.],
[10., 4.],
[nan, 5.],
[nan, 6.]])}
"""
# Initial values
first_p = int(min(data['Track_ID']))
particles = int(max(data['Track_ID'])) - first_p + 1
xyft = {}
xyft['xarray'] = np.zeros((length, particles))
xyft['yarray'] = np.zeros((length, particles))
xyft['farray'] = np.zeros((length, particles))
xyft['tarray'] = np.zeros((length, particles))
xyft['qarray'] = np.zeros((length, particles))
xyft['snarray'] = np.zeros((length, particles))
xyft['iarray'] = np.zeros((length, particles))
track = data[data['Track_ID'] == first_p
].sort_values(['Track_ID', 'Frame'],
ascending=[1, 1]).reset_index(drop=True)
new_frame = np.linspace(0, length-1, length)
old_frame = track['Frame'].values.astype(float)
oldxy = [track['X'].values,
track['Y'].values,
track['Quality'].values,
track['SN_Ratio'].values,
track['Mean_Intensity'].values]
fxy = [interpolate.interp1d(old_frame, oldxy[0], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[1], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[2], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[3], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[4], bounds_error=False,
fill_value=np.nan)]
intxy = [fxy[0](new_frame), fxy[1](new_frame), fxy[2](new_frame),
fxy[3](new_frame), fxy[4](new_frame)]
# Fill in entire array
xyft['xarray'][:, 0] = intxy[0]
xyft['yarray'][:, 0] = intxy[1]
xyft['farray'][:, 0] = new_frame
xyft['tarray'][:, 0] = first_p
xyft['qarray'][:, 0] = intxy[2]
xyft['snarray'][:, 0] = intxy[3]
xyft['iarray'][:, 0] = intxy[4]
for part in range(first_p+1, first_p+particles):
track = data[data['Track_ID'] == part
].sort_values(['Track_ID', 'Frame'],
ascending=[1, 1]).reset_index(drop=True)
old_frame = track['Frame']
oldxy = [track['X'].values,
track['Y'].values,
track['Quality'].values,
track['SN_Ratio'].values,
track['Mean_Intensity'].values]
fxy = [interpolate.interp1d(old_frame, oldxy[0], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[1], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[2], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[3], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[4], bounds_error=False,
fill_value=np.nan)]
intxy = [fxy[0](new_frame), fxy[1](new_frame), fxy[2](new_frame),
fxy[3](new_frame), fxy[4](new_frame)]
xyft['xarray'][:, part-first_p] = intxy[0]
xyft['yarray'][:, part-first_p] = intxy[1]
xyft['farray'][:, part-first_p] = new_frame
xyft['tarray'][:, part-first_p] = part
xyft['qarray'][:, part-first_p] = intxy[2]
xyft['snarray'][:, part-first_p] = intxy[3]
xyft['iarray'][:, part-first_p] = intxy[4]
return xyft
def all_msds2(data, frames=651):
"""Calculates mean squared displacements of input trajectory dataset
Returns numpy array containing MSD data of all tracks in a trajectory pandas
dataframe.
Parameters
----------
data : pandas.core.frame.DataFrame
Contains, at a minimum a 'Frame', 'Track_ID', 'X', and
'Y' column. Note: it is assumed that frames begins at 0.
Returns
-------
new_data : pandas.core.frame.DataFrame
Similar to input data. All missing frames of individual trajectories
are filled in with NaNs, and two new columns, MSDs and Gauss are added:
MSDs, calculated mean squared displacements using the formula
MSD = <(xpos-x0)**2>
Gauss, calculated Gaussianity
Examples
--------
>>> data1 = {'Frame': [0, 1, 2, 3, 4, 0, 1, 2, 3, 4],
... 'Track_ID': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
... 'X': [5, 6, 7, 8, 9, 1, 2, 3, 4, 5],
... 'Y': [6, 7, 8, 9, 10, 2, 3, 4, 5, 6]}
>>> df = pd.DataFrame(data=data1)
>>> cols = ['Frame', 'Track_ID', 'X', 'Y', 'MSDs', 'Gauss']
>>> om flength = max(df['Frame']) + 1
>>> msd.all_msds2(df, frames=length)[cols]
"""
if data.shape[0] > 2:
try:
xyft = make_xyarray(data, length=frames)
length = xyft['xarray'].shape[0]
particles = xyft['xarray'].shape[1]
meansd = np.zeros((length, particles))
gauss = np.zeros((length, particles))
for frame in range(0, length-1):
xpos = np.square(nth_diff(xyft['xarray'], n=frame+1))
ypos = np.square(nth_diff(xyft['yarray'], n=frame+1))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
meansd[frame+1, :] = np.nanmean(xpos + ypos, axis=0)
gauss[frame+1, :] = np.nanmean(xpos**2 + ypos**2, axis=0
)/(2*(meansd[frame+1]**2))
data1 = {'Frame': xyft['farray'].flatten('F'),
'Track_ID': xyft['tarray'].flatten('F'),
'X': xyft['xarray'].flatten('F'),
'Y': xyft['yarray'].flatten('F'),
'MSDs': meansd.flatten('F'),
'Gauss': gauss.flatten('F'),
'Quality': xyft['qarray'].flatten('F'),
'SN_Ratio': xyft['snarray'].flatten('F'),
'Mean_Intensity': xyft['iarray'].flatten('F')}
new_data = | pd.DataFrame(data=data1) | pandas.DataFrame |
import datetime
import json
import os.path
import pandas as pd
import numpy as np
import folium
from folium import plugins
from branca.element import MacroElement
from jinja2 import Template
from flask import Flask, Response
app = Flask(__name__)
app.config.from_object(__name__)
class FloatMacro(MacroElement):
"""Adds a floating image in HTML canvas on top of the map."""
_template = Template("""
{% macro header(this,kwargs) %}
<style>
#{{this.get_name()}} {
position:absolute;
left:{{this.left}}%;
top:{{this.left}}%;
}
</style>
{% endmacro %}
{% macro html(this,kwargs) %}
<img id="{{this.get_name()}}" alt="float_image"
src="{{ this.image }}"
width="{{ this.width }}"
style="z-index: 999999">
</img>
{% endmacro %}
""")
def __init__(self, image, top=75, left=75, width=75):
super(FloatMacro, self).__init__()
self._name = 'FloatImage'
self.image = image
self.top = top
self.left = left
self.width = width
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
def get_file(filename): # pragma: no cover
try:
src = os.path.join(root_dir(), filename)
# Figure out how flask returns static files
# Tried:
# - render_template
# - send_file
# This should not be so non-obvious
return open(src).read()
except IOError as exc:
return str(exc)
def load(path='data/Location History.short.json'):
with open(path, 'r') as fh:
raw = json.loads(fh.read())
# use location_data as an abbreviation for location data
location_data = | pd.DataFrame(raw['locations']) | pandas.DataFrame |
import pandas as pd
import requests
import sys
import os
import urllib3
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import utils.general_utils as general_utils
# import strava_analysis.utils.general_utils as general_utils
def get_updated_access_token(refresh_token,client_id,client_secret):
"""[Strava API requires an access token which needs to be refreshed frequently. In
order to circumvent this, we use the static refresh token, to generate a new
access token each time more data is pulled.
Helpful video for how to do this: https://www.youtube.com/watch?v=sgscChKfGyg ]
"""
# blocks error messages
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# link to go from refresh token --> access token (the later is needed to actually get the data)
auth_url = "https://www.strava.com/oauth/token"
payload = {
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
'grant_type': "refresh_token",
'f': 'json'
}
print("Requesting Token...\n")\
res = requests.post(auth_url, data=payload, verify=False)
access_token = res.json()['access_token']
print("Access Token = {}\n".format(access_token))
return access_token
def get_activities(access_token):
"""[API call to get list of all activities and their information, converts
JSON to df, saves df as csv in data folder]
Args:
access_token ([str]): [updated access token]
"""
# api request to get activities
activites_url = "https://www.strava.com/api/v3/athlete/activities"
header = {'Authorization': 'Bearer ' + access_token}
param = {'per_page': 200, 'page': 1}
my_dataset = requests.get(activites_url, headers=header, params=param).json()
#convert json to pandas df
df = | pd.json_normalize(my_dataset) | pandas.json_normalize |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import lrange, lzip, range
import pandas as pd
from pandas import Index, MultiIndex, Series
import pandas.util.testing as tm
def test_equals(idx):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(idx):
# GH9947, GH10637
index_a = idx
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
| tm.assert_numpy_array_equal(index_a == index_a, expected1) | pandas.util.testing.assert_numpy_array_equal |
import pandas as pd
import numpy as np
import datetime
import os
def construct_weather_data(response, station, cols) -> pd.DataFrame:
timestamps = sorted(response.data.keys())
d = {}
d["time"] = timestamps
for col in cols:
print(col)
values = []
for t in timestamps:
values.append(response.data[t][station][col]["value"])
d[col] = values
return pd.DataFrame.from_dict(d)
# placeholder for data preprocessing function
def preprocess(weather):
for col in ["Air temperature", "Wind speed", "Wind direction", "Wind gust", "Humidity", "Dew point"]:
weather[col] = weather[col].interpolate().bfill()
weather = parse_timestamps(weather)
print(weather.head())
# here model was trained on finnish data
weather = weather.rename(columns={
"Air temperature": "Ilman lämpötila (degC)",
"Wind speed": "Tuulen nopeus (m/s)",
"Wind direction": "Tuulen suunta (deg)",
"Wind gust": "Puuskanopeus (m/s)",
"Humidity": "Suhteellinen kosteus (%)",
"Dew point": "Kastepistelämpötila (degC)"
})
return weather
def parse_timestamps(df):
# print(df["time"].iloc[0].timetuple())
df = df.assign(year=[x.timetuple().tm_year for x in df['time']])
df = df.assign(yday=[x.timetuple().tm_yday for x in df['time']])
df = df.assign(day=[x.timetuple().tm_mday for x in df['time']])
df = df.assign(month=[x.timetuple().tm_mon for x in df['time']])
df = df.assign(wday=[x.timetuple().tm_wday for x in df['time']])
df = df.assign(hour=[x.timetuple().tm_hour for x in df['time']])
df['week'] = df.apply(lambda row: row.time.isocalendar()[1], axis=1)
# placeholder values (not done yet)
df["id"] = np.repeat(1, len(df))
df["yhour"] = np.repeat(1, len(df))
print(df.head())
return df
def predict(config, model):
if isinstance(config, dict):
config = | pd.DataFrame(config) | pandas.DataFrame |
from __future__ import division
import json
import numpy as np
import pandas as pd
from scipy import stats
from visigoth.stimuli import Point, Points, PointCue, Pattern
from visigoth import (AcquireFixation, AcquireTarget,
flexible_values, limited_repeat_sequence)
def define_cmdline_params(self, parser):
parser.add_argument("--timing", default=1, type=float)
def create_stimuli(exp):
# Fixation point
fix = Point(exp.win,
exp.p.fix_pos,
exp.p.fix_radius,
exp.p.fix_trial_color)
# Spatial cue
cue = PointCue(exp.win,
exp.p.cue_norm,
exp.p.cue_radius,
exp.p.cue_color)
# Saccade targets
targets = Points(exp.win,
exp.p.target_pos,
exp.p.target_radius,
exp.p.target_color)
# Average of multiple sinusoidal grating stimulus
pattern = Pattern(exp.win,
n=exp.p.stim_gratings,
elementTex=exp.p.stim_tex,
elementMask=exp.p.stim_mask,
sizes=exp.p.stim_size,
sfs=exp.p.stim_sf,
pos=(0, 0)
)
return locals()
def generate_trials(exp):
"""Yield trial and pulse train info."""
# We need special logic to scheudule the final trial
# given the variability of trial durations.
finished = False
# Create a generator to control cue position repeats
cue_positions = list(range(len(exp.p.stim_pos)))
cue_pos_gen = limited_repeat_sequence(cue_positions,
exp.p.stim_pos_max_repeat)
# Create an infinite iterator for trial data
for t in exp.trial_count():
# Get the current time
now = exp.clock.getTime()
# Check whether we have performed the final trial of the run
if finished or now > (exp.p.run_duration - exp.p.finish_min):
raise StopIteration
# Sample parameters for the next trial and check constraints
attempts = 0
while True:
# Allow experimenter to break if we get stuck here
exp.check_abort()
# Check if we've blown through the final trial window
if exp.clock.getTime() > exp.p.run_duration:
raise StopIteration
# Increment the counter of attempts to find a good trial
attempts += 1
# Sample parameters for a trial
t_info, p_info = generate_trial_info(exp, t, cue_pos_gen)
# Calculate how long the trial will take
trial_dur = (t_info["wait_iti"]
+ t_info["wait_pre_stim"]
+ t_info["pulse_train_dur"]
+ 1)
finish_time = exp.p.run_duration - (now + trial_dur)
# Reject if the next trial is too long
if finish_time < exp.p.finish_min:
# Make a number of attempts to find a trial that finishes with
# enough null time at the end of the run
if attempts < 50:
continue
# If we are having a hard time scheduling a trial that gives
# enough null time, relax our criterion to get a trial that
# just finishes before the scanner does
if finish_time < 0:
continue
# Check if next trial will end in the finish window
if finish_time < (exp.p.finish_max * exp.p.timing):
finished = True
# Use these parameters for the next trial
break
yield t_info, p_info
def generate_trial_info(exp, t, cue_pos_gen):
# Schedule the next trial
wait_iti = flexible_values(exp.p.wait_iti)
if t == 1:
# Handle special case of first trial
if exp.p.skip_first_iti:
wait_iti = 0
else:
# Handle special case of early fixbreak on last trial
last_t_info = exp.trial_data[-1][0]
if last_t_info.fixbreak_early:
if exp.p.wait_iti_early_fixbreak is not None:
wait_iti = exp.p.wait_iti_early_fixbreak
# Determine the stimulus parameters for this trial
cue_pos = next(cue_pos_gen)
gen_dist = flexible_values(list(range(len(exp.p.dist_means))))
gen_mean = exp.p.dist_means[gen_dist]
gen_sd = exp.p.dist_sds[gen_dist]
target = exp.p.dist_targets[gen_dist]
trial_info = exp.trial_info(
# Stimulus parameters
cue_pos=cue_pos,
gen_dist=gen_dist,
gen_mean=gen_mean,
gen_sd=gen_sd,
target=target,
# Pulse info (filled in below)
log_contrast_mean=np.nan,
pulse_count=np.nan,
pulse_train_dur=np.nan,
# Timing parameters
wait_iti=wait_iti,
wait_pre_stim=flexible_values(exp.p.wait_pre_stim) * exp.p.timing,
wait_resp=flexible_values(exp.p.wait_resp),
wait_feedback=flexible_values(exp.p.wait_feedback),
# Track fixbreaks before pulses
fixbreak_early=np.nan,
# Achieved timing data
onset_fix=np.nan,
offset_fix=np.nan,
onset_cue=np.nan,
offset_cue=np.nan,
onset_targets=np.nan,
onset_feedback=np.nan,
)
t_info = pd.Series(trial_info, dtype=np.object)
p_info = generate_pulse_info(exp, t_info)
# Insert trial-level information determined by pulse schedule
t_info["log_contrast_mean"] = p_info["log_contrast"].mean()
t_info["trial_llr"] = p_info["pulse_llr"].sum()
t_info["pulse_count"] = len(p_info)
t_info["pulse_train_dur"] = (p_info["gap_dur"].sum()
+ p_info["pulse_dur"].sum())
return t_info, p_info
def generate_pulse_info(exp, t_info):
"""Generate the pulse train for a given trial."""
rng = np.random.RandomState()
# Randomly sample the pulse count for this trial
if rng.rand() < exp.p.pulse_single_prob:
count = 1
else:
count = int(flexible_values(exp.p.pulse_count, random_state=rng,
max=exp.p.pulse_count_max))
# Account for the duration of each pulse
pulse_dur = flexible_values(exp.p.pulse_dur, count, rng)
total_pulse_dur = np.sum(pulse_dur)
# Randomly sample gap durations with a constraint on trial duration
train_dur = np.inf
while train_dur > (exp.p.pulse_train_max * exp.p.timing):
gap_dur = flexible_values(exp.p.pulse_gap, count, rng) * exp.p.timing
train_dur = np.sum(gap_dur) + total_pulse_dur
# Generate the stimulus strength for each pulse
max_contrast = 1 / np.sqrt(exp.p.stim_gratings)
contrast_dist = "norm", t_info["gen_mean"], t_info["gen_sd"]
log_contrast = flexible_values(contrast_dist, count, rng,
max=np.log10(max_contrast))
# Define the LLR of each pulse
pulse_llr = compute_llr(log_contrast, exp.p.dist_means, exp.p.dist_sds)
# Determine the stimulus position
# TODO this currently hardcodes 2 possible stimulus positions for testing
if t_info["cue_pos"] == 0:
ps = [exp.p.cue_validity, 1 - exp.p.cue_validity]
elif t_info["cue_pos"] == 1:
ps = [1 - exp.p.cue_validity, exp.p.cue_validity]
stim_pos = np.random.choice([0, 1], count, p=ps)
p_info = pd.DataFrame(dict(
# Basic trial information
subject=exp.p.subject,
session=exp.p.session,
run=exp.p.run,
trial=t_info["trial"],
# Pulse information
pulse=np.arange(1, count + 1),
stim_pos=stim_pos,
log_contrast=log_contrast,
contrast=10 ** log_contrast,
pulse_llr=pulse_llr,
pulse_dur=pulse_dur,
gap_dur=gap_dur,
# Achieved performance
occurred=False,
blink=False,
pulse_onset=np.nan,
pulse_offset=np.nan,
dropped_frames=np.nan,
))
return p_info
def compute_llr(c, means, sds):
"""Compute the pulse log-likelihood supporting Target 1."""
# Define the generating distributions
m0, m1 = means
s0, s1 = sds
d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1)
# Compute LLR of each pulse
l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c))
llr = l1 - l0
return llr
def run_trial(exp, info):
t_info, p_info = info
# ~~~ Set trial-constant attributes of the stimuli
exp.s.cue.pos = exp.p.stim_pos[t_info.cue_pos]
# ~~~ Inter-trial interval
exp.s.fix.color = exp.p.fix_iti_color
exp.wait_until(exp.iti_end, draw="fix", iti_duration=t_info.wait_iti)
# ~~~ Trial onset
t_info["onset_fix"] = exp.clock.getTime()
exp.s.fix.color = exp.p.fix_ready_color
res = exp.wait_until(AcquireFixation(exp),
timeout=exp.p.wait_fix,
draw="fix")
if res is None:
t_info["result"] = "nofix"
exp.sounds.nofix.play()
return t_info, p_info
for frame in exp.frame_range(seconds=exp.p.wait_start):
exp.check_fixation(allow_blinks=True)
exp.draw("fix")
# ~~~ Pre-stimulus period
exp.s.fix.color = exp.p.fix_trial_color
prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim,
yield_skipped=True)
for frame, skipped in prestim_frames:
if not exp.check_fixation(allow_blinks=True):
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["fixbreak_early"] = True
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
flip_time = exp.draw(["fix", "cue", "targets"])
if not frame:
t_info["onset_targets"] = flip_time
t_info["onset_cue"] = flip_time
t_info["fixbreak_early"] = False
# ~~~ Stimulus period
for p, info in p_info.iterrows():
# Update the pattern
exp.s.pattern.pos = exp.p.stim_pos[info.stim_pos]
exp.s.pattern.contrast = info.contrast
exp.s.pattern.randomize_phases()
# Show each frame of the stimulus
for frame in exp.frame_range(seconds=info.pulse_dur):
if not exp.check_fixation(allow_blinks=True):
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
stims = ["fix", "cue", "targets", "pattern"]
flip_time = exp.draw(stims)
if not frame:
exp.tracker.send_message("pulse_onset")
p_info.loc[p, "occurred"] = True
p_info.loc[p, "pulse_onset"] = flip_time
blink = not exp.tracker.check_eye_open(new_sample=False)
p_info.loc[p, "blink"] |= blink
# This counter is reset at beginning of frame_range
# so it should could to frames dropped during the stim
p_info.loc[p, "dropped_frames"] = exp.win.nDroppedFrames
gap_frames = exp.frame_range(seconds=info.gap_dur)
for frame in gap_frames:
if not exp.check_fixation(allow_blinks=True):
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
flip_time = exp.draw(["fix", "cue", "targets"])
# Record the time of first flip as the offset of the last pulse
if not frame:
p_info.loc[p, "pulse_offset"] = flip_time
# ~~~ Response period
# Collect the response
now = exp.clock.getTime()
t_info["offset_fix"] = now
t_info["offset_cue"] = now
res = exp.wait_until(AcquireTarget(exp, t_info.target),
timeout=exp.p.wait_resp,
draw="targets")
if res is None:
t_info["result"] = "fixbreak"
else:
t_info.update( | pd.Series(res) | pandas.Series |
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_ind
from sklearn.preprocessing import LabelEncoder
def load_data():
questionnaire = pd.read_excel('XAutoML.xlsx')
encoder = LabelEncoder()
encoder.classes_ = np.array(['strongly disagree', 'disagree', 'neutral', 'agree', 'strongly agree'])
for c in questionnaire.columns:
try:
questionnaire.loc[:, c] = questionnaire.loc[:, c].str.strip().str.lower()
questionnaire.loc[:, c] = encoder.transform(questionnaire.loc[:, c])
except (AttributeError, ValueError):
pass
questionnaire.columns = questionnaire.columns.str.strip()
requirements = pd.read_excel('task_results.ods', sheet_name='Requirements', skiprows=1)
requirements = requirements.drop(index=[24], columns=['Unnamed: 1']).T
requirements.columns = requirements.iloc[0]
requirements = requirements[1:]
tasks = | pd.read_excel('task_results.ods', sheet_name=0) | pandas.read_excel |
#!/usr/bin/env python
# coding: utf-8
# Author : <NAME>
# Initial Date: Feb 17, 2020
# About: strymread class to read CAN data from CSV file captured using
# libpanda (https://jmscslgroup.github.io/libpanda/) or from `strym` class.
# Read associated README for full description
# License: MIT License
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ARIZONA BOARD OF REGENTS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# For System and OS level task
import sys, getopt
## General Data processing and visualization Import
import time
import ntpath
import datetime
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (16,8)
plt.rcParams["image.cmap"] = "Dark2"
# to change default color cycle
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Dark2.colors)
from scipy.interpolate import interp1d
from scipy import signal
import pandas as pd # Note that this is not commai Panda, but Database Pandas
from scipy import integrate
import pickle
import os
from os.path import expanduser
import seaborn as sea
import plotly.express as px
import csv
import copy
import scipy.stats
# cantools import
import cantools
import strym.DBC_Read_Tools as dbc
import pkg_resources
from subprocess import Popen, PIPE
from .utils import configure_logworker
LOGGER = configure_logworker()
dbc_resource = ''
try:
import importlib.resources as pkg_resources
with pkg_resources.path('strym', 'dbc') as rsrc:
dbc_resource = rsrc
except ImportError:
# Try backported to PY<37 `importlib_resources`.
print("Python older than 3.7 detected. ")
try:
import importlib_resources as pkg_resources
with pkg_resources.path('strym', 'dbc') as rsrc:
dbc_resource = rsrc
except ImportError:
print("importlib_resources not found. Install backported importlib_resources through `pip install importlib-resources`")
import vin_parser as vp
# from sqlalchemy import create_engine
import sqlite3
import matplotlib.colors as colors
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
import plotly.offline as pyo
# Set notebook mode to work in offline
pyo.init_notebook_mode()
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from .config import config
class strymread:
'''
`strymread` reads the logged CAN data from the given CSV file.
This class provides several utilities functions
Parameters
----------------
csvfile: `str`, `pandas.DataFrame`, default = None
The CSV file to be read. If `pandas.DataFrame` is supplied, then csvfile is set to None
PandasDataFrame, if provided, must have columns ["Time", "Message", "MessageID", "Bus"]
dbcfile: `str`, default = ""
The DBC file which will provide codec for decoding CAN messages
kwargs: variable list of argument in the dictionary format
bus: `list` | default = None
A list of integer correspond to Bus ID.
dbcfolder: `str` | default = None
Specifies a folder path where to look for appropriate dbc if dbcfile='' or dbcfile = None
Appropriate dbc file can be inferred from <brand>_<model>_<year>.dbc
If dbcfolder is None or empty string, then by default, strymread will look for dbc file in the dbc folder of the package where we ship sample dbc file to work with.
verbose: `bool`
Option for verbosity, prints some information when True
createdb: `bool`
If True, creates a sqlite3 database for raw CAN data if the database doesn't exist
dbdir: `str`
Optional argument that specifies where sqlite3 database will be stored.
The default location is `~/.strym/`
Attributes
---------------
dbcfile: `str`, default = ""
The filepath of DBC file
csvfile:`str` | `pandas.DataFrame`
The filepath of CSV Data file, or, raw CAN Message DataFrame
dataframe: `pandas.Dataframe`
Pandas dataframe that stores content of csvfile as dataframe
dataframe_raw: `pandas.Dataframe`
Pandas original dataframe with all bus IDs. When `bus=` is passed to the constructor to filter out dataframe based on bus id, then original dataframe is save
in dataframe_raw
candb: `cantools.db`
CAN database fetched from DBC file
burst: `bool`
A boolean flag that checks if CAN data came in burst. If `True`, then CAN Data was captured in burst, else
`False`. If CAN Data came in burst (as in say 64 messages at a time or so)
then any further analysis might not be reliable. Always check that.
success: `bool`
A boolean flag, if `True`, tells that reading of CSV file was successful.
bus: `list` | default = None
A list of integer correspond to Bus ID.
dbcfolder: `str` | default = None
Specifies a folder path where to look for appropriate dbc if `dbcfile=""` or `dbcfile = None`
Appropriate dbc file can be inferred from <brand>_<model>_<year>.dbc
If dbcfolder is None or empty string, then by default, strymread will look for dbc file in package's dbcfolder
where we ship sample dbc file to work with.
dbdir:`str`
Location of database where sqlite3 database for CAN Dataframe will stored.
Default location: `~/.strym/`
database: `str`
The name of the database corresponding to the model/make of the vehicle from which the CAN data
was captured
inferred_dbc: `str`
DBC file inferred from the name of the csvfile passed.
Returns
---------------
`strymread`
Returns an object of type `strymread` upon successful reading or else return None
Example
----------------
>>> import strym
>>> from strym import strymread
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> dbcfile = 'newToyotacode.dbc'
>>> csvdata = '2020-03-20.csv'
>>> r0 = strymread(csvfile=csvdata, dbcfile=dbcfile)
'''
sunset = truncate_colormap(plt.get_cmap('magma'), 0.0, 0.7) # truncated color map from magma
def __init__(self, csvfile, dbcfile = "", **kwargs):
# success attributes will be set to True ultimately if everything goes well and csvfile is read successfully
self.success = False
if csvfile is None:
print("csvfile is None. Unable to proceed with further analysis. See https://jmscslgroup.github.io/strym/api_docs.html#module-strym for further details.")
return
if isinstance(csvfile, pd.DataFrame):
self.dataframe = csvfile
self.csvfile = ''
if ((len(dbcfile) == 0) or (dbcfile is None)):
print("Please provide a valid dbcfile using argument `dbcfile` to strymread if you intend to supply a dataframe to strymread")
return
elif isinstance(csvfile, str):
# Check if file exists
if not os.path.exists(csvfile):
print("Provided csvfile: {} doesn't exist, or read permission error".format(csvfile))
return
# if file size is less than 60 bytes, return without processing
if os.path.getsize(csvfile) < 60:
print("Nothing significant to read in {}. No further analysis is warranted.".format(csvfile))
return
self.csvfile = csvfile
self.basefile = ntpath.basename(csvfile)
else:
print("Unsupported type for csvfile. Please see https://jmscslgroup.github.io/strym/api_docs.html#module-strym for further details.")
return
# Optional argument for verbosity
self.verbose = kwargs.get("verbose", False)
# Optional argument for bus ID
self.bus = kwargs.get("bus", None)
# Optional argument for dbcfolder where to look for dbc files
self.dbcfolder = kwargs.get("dbcfolder", None)
# Optional argument to tell strymread whether to create a table of the raw count in the db
self.createdb = kwargs.get("createdb", False)
default_db_dir = expanduser("~") + "/.strym/"
# Optional argument for where TIMESERIES DB will be saved
self.dbdir = kwargs.get("dbdir", default_db_dir)
if not os.path.exists(self.dbdir):
if self.verbose:
print("The directory {} for timeseries db doesn't exist, creating one".format(self.dbdir ))
try:
os.mkdir(self.dbdir)
except OSError as error:
print(error)
# If a single bus ID is passed, convert it to list of one item, if multiple bus ID
# needs to be passed, then it must be passed as int
if isinstance(self.bus, int):
self.bus = [self.bus]
# If data were recorded in the first then burst attribute will be set to True. In practical scenario, we won't proceeding
# with further analysis when data comes in burst, however, if csvfile has data in burst, no real error will be raised. It
# will be upto user to check attribute boolean for True/False
self.burst = False
if len(self.csvfile) > 0:
# All CAN messages will be saved as pandas dataframe
try:
# Get the number of rows using Unix `wc` word count function
is_windows = sys.platform.startswith('win')
if not is_windows:
word_counts = Popen(['wc', '-l', self.csvfile], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = word_counts.communicate()
output = output.decode("utf-8")
output = output.strip()
output = output.split(' ')
n_lines = int(output[0])
if n_lines < 5:
print("Not enough data to read in the provided csvfile {}".format(ntpath.basename(self.csvfile)))
return
self.dataframe = pd.read_csv(self.csvfile,dtype={'Time': np.float64,'Bus':np.uint8, 'MessageID': np.uint32, 'Message': str, 'MessageLength': np.uint16}, nrows=n_lines - 2)
else:
self.dataframe = pd.read_csv(self.csvfile,dtype={'Time': np.float64,'Bus':np.uint8, 'MessageID': np.uint32, 'Message': str, 'MessageLength': np.uint16}, skipfooter=2)
except pd.errors.ParserError:
print("Ill-formated CSV File. A properly formatted CAN-data CSV file must have at least following columns: ['Time', 'Bus', 'MessageID', 'Message']")
print("No data was written the csvfile. Unable to perform further operation")
return
except UnicodeDecodeError:
print("Ill-formated CSV File. A properly formatted CAN-data CSV file must have at least following columns: ['Time', 'Bus', 'MessageID', 'Message']")
print("No data was written to the csvfile. Unable to perform further operation")
return
except pd.errors.EmptyDataError:
print("CSVfile is empty.")
return
if self.dataframe.shape[0] == 0:
print("No data was present in the csvfile or pandas dataframe supplied is empty. Unable to perform further operation")
return
self.dataframe = self.dataframe.dropna()
if set(['Time', 'MessageID', 'Message', 'Bus']).issubset(self.dataframe.columns) == False:
print("Ill-formated CSV File or pandas dataframe. A properly formatted CAN-data CSV file/dataframe must have at least following columns: ['Time', 'Bus', 'MessageID', 'Message']")
print("Unable to perform further operation")
return
if np.any(np.diff(self.dataframe['Time'].values) < 0.0):
print("Warning: Timestamps are not monotonically increasing. Further analysis is not recommended.")
return
def vin(csvfile):
"""
returns the vehicle identification number, VIN, (if detected) from the filename
uses a very very simple method of looking for a 17 char string near the end of the filename
Parameters
--------------
csvfile: `str`
Parse VIN number from the name of the `csvfile`
"""
# we use underscores to split up the filename
splits = csvfile.split('_')
candidates = []
# print(f'The splits of the file are {splits}')
for split in splits:
# all VIN are 17 chars long
if len(split) == 17:
# keep them in an array, in case the path has other 17 char elements
candidates.append(split)
if len(candidates) >= 1:
# return the end element, as none of our fileendings has 17 char elements at this time
# HACK: if folks create _some17charfileending.csv then this will fail
return candidates[-1]
else:
return 'VIN not part of filename'
vin = vin(self.csvfile)
brand = "toyota"
model = "rav4"
year = "2019"
try:
if vp.check_valid(vin) == True:
brand = vp.manuf(vin)
brand = brand.split(" ")[0].lower()
try:
model = vp.online_parse(vin)['Model'].lower()
except ConnectionError as e:
print("Retrieving model of the vehicle requires internet connection. Check your connection.")
return
year = str(vp.year(vin))
LOGGER.info("Vehicle model infered is {}-{}-{}".format(brand, model, year))
except:
if self.verbose:
print('No valid vin... Continuing as Toyota RAV4. If this is inaccurate, please append VIN number to csvfile prefixed with an underscore.')
self.inferred_dbc = "{}_{}_{}.dbc".format(brand, model, year)
if (dbcfile is None) or(dbcfile==""):
dbcfile = str(dbc_resource) + "/" + self.inferred_dbc
if not os.path.exists(dbcfile):
print("The dbcfile: {} doesn't exist, or read permission error".format(dbcfile))
return
# if control comes to the point, then the reading of CSV file was successful
self.success = True
self.dataframe = self.timeindex(self.dataframe, inplace=True)
self.dataframe_raw = None
if self.bus is not None:
if not np.all(np.isin(self.bus, self.dataframe['Bus'].unique())):
print("One of the bus id not available.")
print("Available BUS IDs are {}".format(self.dataframe['Bus'].unique()))
self.success = False
return
else:
self.dataframe_raw = self.dataframe.copy(deep = True)
self.dataframe = self.dataframe[self.dataframe['Bus'].isin(self.bus)]
# Check if data came in burst
T = self.dataframe['Time'].diff()
T_head = T[1:64]
if np.mean(T_head) == 0.0:
self.burst = True
# DBC file that has CAN message codec
self.dbcfile = dbcfile
# save the CAN database for later use
if self.dbcfile:
self.candb = cantools.db.load_file(self.dbcfile)
else:
self.candb = None
# initialize the dbc lookups for any particular usage
# this creates the dict later used to figure out which signals/msgs to
# use when decoding these data
self._dbc_init_dict()
# We will create an SQLite DB based on VIN number
self.database = brand.upper() + '_' + model.upper() + '_' + year.upper() + ".db"
self.raw_table = "RAW_CAN"
self.db_location = '{}{}'.format(self.dbdir, self.database)
if self.createdb:
dbconnection = self.dbconnect(self.db_location)
cursor = dbconnection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS {} (Clock TIMESTAMP, Time REAL NOT NULL, Bus INTEGER, MessageID INTEGER, Message TEXT, MessageLength INTEGER, PRIMARY KEY (Clock, Bus, MessageID, Message));'.format(self.raw_table))
dbconnection.commit()
try:
self.dataframe[['Time', 'Bus', 'MessageID', 'Message', 'MessageLength']].to_sql(self.raw_table, con=dbconnection, index=True, if_exists='append')
except sqlite3.IntegrityError as e:
print(e)
if self.verbose:
print("Attempted to insert duplicate entries to the RAW_CAN table.\nRAW_CAN table has (Clock, Bus, MessageID, Message) composite primary key.")
def dbconnect(self, db_location):
"""
Creates dbconnection and returns db connection object
Parameters
------------
db_location: `str`
sqlite db url
"""
dbconnection = None
try:
dbconnection = sqlite3.connect(db_location)
except sqlite3.Error as e:
print(e)
# dbengine = create_engine(db_location, echo = self.verbose )
# dbengine.connect()
# dbconnection = self.dbengine.raw_connection()
return dbconnection
def _set_dbc(self):
'''
`_set_dbc` sets the DBC file
'''
self.dbcfile = input('DBC file unspecified. Enter the filepath of the DBC file: ')
if self.dbcfile:
try:
self.dbcfile = str(self.dbcfile)
print("The new DBC file entered is: {}".format(self.dbcfile))
except ValueError:
print('DBC file entered is not a string')
raise
self.candb = cantools.db.load_file(self.dbcfile)
def get_ts(self, msg, signal, verbose=False):
'''
`get_ts` returns Timeseries data by given `msg_name` and `signal_name`
Parameters
-------------
msg: `string` | `int`
A valid message that can be found in the given DBC file. Can be specified as message name or message ID
signal: `string` | `int`
A valid signal in string format corresponding to `msg_name` that can be found in the given DBC file. Can be specified as signal name or signal ID
verbose: `bool`, default = False
If True, print some information
'''
if not self.dbcfile:
self._set_dbc()
assert(isinstance(msg, int) or isinstance(msg, str)), ("Only Integer message ID or string name is supported for msg_name")
assert(isinstance(signal, int) or isinstance(signal, str)), ("Only Integer signal ID or string name is supported for signal_name")
if isinstance(msg, int):
msg = dbc.getMessageName(msg, self.candb)
if verbose:
print("Message Name: {}".format(msg))
if isinstance(signal, int):
signal = dbc.getSignalName(msg, signal, self.candb)
if verbose:
print("Signal Name: {}\n".format(signal))
# try-exception is fix for hybrid RAV4 since if you are using data
# from a hybrid the accel message length is 4 vs. 8 in the Internal Combustion Engine
ts = pd.DataFrame(columns = ["Time", "Message"])
try:
ts = dbc.convertData(msg, signal, self.dataframe, self.candb)
except ValueError as e:
if (isinstance(msg, int) and msg == 552) or (isinstance(msg, str) and msg == 'ACCELEROMETER'):
if 'Short' in str(e):
LOGGER.info('Found RAV4 where acceleration messages are 4 bytes.')
# accel_def = self.candb.get_message_by_name('ACCELEROMETER')
# index_of_acceldef = 0
# for i, m in enumerate(self.candb.messages):
# if m == accel_def:
# index_of_acceldef = i
# break
# accel_def.length = 4
# self.candb.messages[index_of_acceldef] = accel_def
self.dataframe = dbc.CleanData(self.dataframe,address=552)
ts = dbc.convertData(msg, signal, self.dataframe, self.candb)
return ts
def messageIDs(self):
'''
Retreives list of all messages IDs available in the given CSV-formatted CAN data file.
Returns
---------
`list`
A python list of all available message IDs in the given CSV-formatted CAN data file.
'''
msgIDs = self.dataframe['MessageID'].unique()
msgIDs.sort()
return msgIDs
def count(self, plot = False):
'''
A utility function to return and optionally plot the counts for each Message ID as bar graph
Returns
----------
`pandas.DataFrame`
A pandas DataFrame with total message counts per Message ID and total count by Bus
Example
---------
>>> import strym
>>> from strym import strymread
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> dbcfile = 'newToyotacode.dbc'
>>> csvdata = '2020-03-20.csv'
>>> r0 = strymread(csvfile=csvlist[0], dbcfile=dbcfile)
>>> r0.count()
'''
dataframe = self.dataframe
if plot:
r1 = dataframe[dataframe['MessageID'] <=200]
r2 = dataframe[(dataframe['MessageID'] >200) & (dataframe['MessageID'] <= 400)]
r3 = dataframe[(dataframe['MessageID'] >400) & (dataframe['MessageID'] <= 600)]
r4 = dataframe[(dataframe['MessageID'] >600) & (dataframe['MessageID'] <= 800)]
r5 = dataframe[(dataframe['MessageID'] >800) & (dataframe['MessageID'] <= 1000)]
r6 = dataframe[(dataframe['MessageID'] >1000) & (dataframe['MessageID'] <= 1200)]
r7 = dataframe[(dataframe['MessageID'] >1200) & (dataframe['MessageID'] <= 1400)]
r8 = dataframe[(dataframe['MessageID'] >1400) ]
r_df = [r1, r2, r3, r4, r5, r6, r7, r8]
self._setplots(ncols=2, nrows=4)
fig, axes = self.create_fig(ncols=2, nrows=4)
plt.rcParams['figure.figsize'] = (16, 8)
fig.tight_layout(pad=5.0)
ax = axes.ravel()
for i in range(0, 8):
cnt = r_df[i]['MessageID'].value_counts()
cnt = cnt.sort_index(ascending=True)
if cnt.empty:
continue
cnt.plot(kind='bar', ax=ax[i])
ax[i].tick_params(axis="x")
ax[i].tick_params(axis="y")
fig.suptitle("Message ID counts: "+ ntpath.basename(self.csvfile), y=0.98)
fig.show()
bus = dataframe['Bus'].unique()
bus.sort()
columns = ['Counts_Bus_' + str(int(s)) for s in bus]
columns.insert(0, 'MessageID')
all_msgs = self.messageIDs()
dfx = pd.DataFrame(columns=columns)
dfx['MessageID'] = all_msgs
dfx.index = dfx['MessageID'].values
countbybus = dataframe.groupby(['MessageID', 'Bus'])
for key,item in countbybus:
a_group = countbybus.get_group(key)
dfx.at[key[0], 'Counts_Bus_{}'.format(int(key[1]))] = a_group.shape[0]
dfx.fillna(0, inplace=True)
dfx['TotalCount'] = 0
for b in bus:
dfx['TotalCount'] = dfx['TotalCount'] + dfx['Counts_Bus_{}'.format(int(b))]
return dfx
def start_time(self):
'''
`start_time` retrieves the the human-readable time when logging of the data started
Returns
---------
`str`
Human-readable string-formatted time.
'''
return time.ctime(self.dataframe["Time"].iloc[0])
def end_time(self):
'''
`end_time` retrieves the the human-readable time when logging of the data was stopped.
Returns
---------
`str`
Human-readable string-formatted time.
'''
return time.ctime(self.dataframe["Time"].iloc[-1])
def triptime(self):
'''
`triptime` retrieves total duration of the recording for given CSV-formatted log file in seconds.
Returns
---------
`double`
Duration in seconds.
'''
duration = self.dataframe["Time"].iloc[-1] - self.dataframe["Time"].iloc[0]
return duration
def triplength(self, time=-1):
'''
`triplength` returns total distance travelled while logging CAN data.
Alternative, one can provide a second argument `time` to query how much distance was traveled in, say 50 seconds from start.
Parameters
-----------
time: `double`
Provide a valid elapsed time in seconds to query how much distance was traveled `time` seconds since the logging of data was started.
'''
# first convert speed in km/h to m/s
speed = self.speed()
speed_in_ms = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import argparse
import pandas as pd
import re
#read arguments
parser = argparse.ArgumentParser(description="Recluster the gene clusters by species pairs based on orthopairs")
parser.add_argument("--orthopairs", "-op", required=True)
parser.add_argument("--orthogroups", "-og", required=True)
parser.add_argument("--species1", "-s1", required=True)
parser.add_argument("--species2", "-s2", required=True)
parser.add_argument("--output_file", "-out", required=True)
args = parser.parse_args()
my_orthopairs = args.orthopairs
my_orthogroup = args.orthogroups
species1 = args.species1
species2 = args.species2
my_output = args.output_file
#### Main
#read input
orthopairs_df = pd.read_table(my_orthopairs, sep="\t", header=0, names=["GeneID1", "GeneID2"])
orthopairs_df["GeneID1"] = [re.sub(".*\\|", "", element) for element in list(orthopairs_df["GeneID1"])]
orthopairs_df["GeneID2"] = [re.sub(".*\\|", "", element) for element in list(orthopairs_df["GeneID2"])]
orthogroups_df = pd.read_table(my_orthogroup, sep="\t", header=0, names=["ClusterID", "Species", "GeneID", ""], index_col=None)
#add Species to orthopairs
geneID_species_dict = | pd.Series(orthogroups_df.Species.values, index=orthogroups_df.GeneID) | pandas.Series |
from sklearn.linear_model import LogisticRegression
import argparse
import os
import numpy as np
from sklearn.metrics import mean_squared_error
import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
from azureml.core.run import Run
from azureml.data.dataset_factory import TabularDatasetFactory
# TODO: Create TabularDataset using TabularDatasetFactory
# Data is located at:
# "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv"
### YOUR CODE HERE ###
run = Run.get_context()
def clean_data(data):
# Dict for cleaning data
months = {"jan":1, "feb":2, "mar":3, "apr":4, "may":5, "jun":6, "jul":7, "aug":8, "sep":9, "oct":10, "nov":11, "dec":12}
weekdays = {"mon":1, "tue":2, "wed":3, "thu":4, "fri":5, "sat":6, "sun":7}
# Clean and one hot encode data
x_df = data.to_pandas_dataframe().dropna()
jobs = pd.get_dummies(x_df.job, prefix="job")
x_df.drop("job", inplace=True, axis=1)
x_df = x_df.join(jobs)
x_df["marital"] = x_df.marital.apply(lambda s: 1 if s == "married" else 0)
x_df["default"] = x_df.default.apply(lambda s: 1 if s == "yes" else 0)
x_df["housing"] = x_df.housing.apply(lambda s: 1 if s == "yes" else 0)
x_df["loan"] = x_df.loan.apply(lambda s: 1 if s == "yes" else 0)
contact = pd.get_dummies(x_df.contact, prefix="contact")
x_df.drop("contact", inplace=True, axis=1)
x_df = x_df.join(contact)
education = | pd.get_dummies(x_df.education, prefix="education") | pandas.get_dummies |
import argparse
import os
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from molgym.tools.analysis import parse_json_lines_file, parse_results_filename, collect_results_paths
# Styling
fig_width = 3.3
fig_height = 2.1
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 6})
colors = [
'#1f77b4', # muted blue
'#d62728', # brick red
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf', # blue-teal
]
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Analyse MolGym Output')
parser.add_argument('--dir', help='path to results directory (repeatable)', required=True, action='append')
parser.add_argument('--baseline', help='baseline (repeatable)', required=False, action='append')
parser.add_argument('--max_num_steps', help='plot up to maximum number of steps', required=False, type=int)
parser.add_argument('--mode',
help='train or eval mode',
required=False,
type=str,
choices=['train', 'eval'],
default='eval')
parser.add_argument('--output', help='filename of output file', required=True)
return parser.parse_args()
def get_data(directories: List[str], mode: str) -> pd.DataFrame:
paths = []
for directory in directories:
paths += collect_results_paths(directory=directory, mode=mode)
assert len(paths) > 0
frames = []
for path in paths:
data = parse_json_lines_file(path)
df = pd.DataFrame(data)
info = parse_results_filename(os.path.basename(path))
df['seed'] = info['seed']
df['name'] = info['name']
df['mode'] = info['mode']
frames.append(df)
data = | pd.concat(frames) | pandas.concat |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import uuid
from datetime import datetime
import logging
from math import nan
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import tests.test_app
import superset.viz as viz
from superset import app
from superset.constants import NULL_STRING
from superset.exceptions import SpatialException
from superset.utils.core import DTTM_ALIAS
from .base_tests import SupersetTestCase
from .utils import load_fixture
logger = logging.getLogger(__name__)
class BaseVizTestCase(SupersetTestCase):
def test_constructor_exception_no_datasource(self):
form_data = {}
datasource = None
with self.assertRaises(Exception):
viz.BaseViz(datasource, form_data)
def test_process_metrics(self):
# test TableViz metrics in correct order
form_data = {
"url_params": {},
"row_limit": 500,
"metric": "sum__SP_POP_TOTL",
"entity": "country_code",
"secondary_metric": "sum__SP_POP_TOTL",
"granularity_sqla": "year",
"page_length": 0,
"all_columns": [],
"viz_type": "table",
"since": "2014-01-01",
"until": "2014-01-02",
"metrics": ["sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"],
"country_fieldtype": "cca3",
"percent_metrics": ["count"],
"slice_id": 74,
"time_grain_sqla": None,
"order_by_cols": [],
"groupby": ["country_name"],
"compare_lag": "10",
"limit": "25",
"datasource": "2__table",
"table_timestamp_format": "%Y-%m-%d %H:%M:%S",
"markup_type": "markdown",
"where": "",
"compare_suffix": "o10Y",
}
datasource = Mock()
datasource.type = "table"
test_viz = viz.BaseViz(datasource, form_data)
expect_metric_labels = [
u"sum__SP_POP_TOTL",
u"SUM(SE_PRM_NENR_MA)",
u"SUM(SP_URB_TOTL)",
u"count",
]
self.assertEqual(test_viz.metric_labels, expect_metric_labels)
self.assertEqual(test_viz.all_metrics, expect_metric_labels)
def test_get_df_returns_empty_df(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
datasource = self.get_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
result = test_viz.get_df(query_obj)
self.assertEqual(type(result), pd.DataFrame)
self.assertTrue(result.empty)
def test_get_df_handles_dttm_col(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = Mock()
datasource = Mock()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
test_viz = viz.BaseViz(datasource, form_data)
test_viz.df_metrics_to_num = Mock()
test_viz.get_fillna_for_columns = Mock(return_value=0)
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]})
datasource.offset = 0
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
mock_dttm_col.python_date_format = "epoch_ms"
result = test_viz.get_df(query_obj)
import logging
logger.info(result)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
mock_dttm_col.python_date_format = None
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
datasource.offset = 1
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS)
)
datasource.offset = 0
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01"]})
mock_dttm_col.python_date_format = "%Y-%m-%d"
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS)
)
def test_cache_timeout(self):
datasource = self.get_datasource_mock()
datasource.cache_timeout = 0
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(0, test_viz.cache_timeout)
datasource.cache_timeout = 156
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(156, test_viz.cache_timeout)
datasource.cache_timeout = None
datasource.database.cache_timeout = 0
self.assertEqual(0, test_viz.cache_timeout)
datasource.database.cache_timeout = 1666
self.assertEqual(1666, test_viz.cache_timeout)
datasource.database.cache_timeout = None
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(app.config["CACHE_DEFAULT_TIMEOUT"], test_viz.cache_timeout)
class TableVizTestCase(SupersetTestCase):
def test_get_data_applies_percentage(self):
form_data = {
"groupby": ["groupA", "groupB"],
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"count",
"avg__C",
],
"percent_metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"avg__B",
],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"SUM(value1)": [15, 20, 25, 40],
"avg__B": [10, 20, 5, 15],
"avg__C": [11, 22, 33, 44],
"count": [6, 7, 8, 9],
"groupA": ["A", "B", "C", "C"],
"groupB": ["x", "x", "y", "z"],
}
)
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data and computes percents
self.assertEqual(
[
"groupA",
"groupB",
"SUM(value1)",
"count",
"avg__C",
"%SUM(value1)",
"%avg__B",
],
list(data["columns"]),
)
expected = [
{
"groupA": "A",
"groupB": "x",
"SUM(value1)": 15,
"count": 6,
"avg__C": 11,
"%SUM(value1)": 0.15,
"%avg__B": 0.2,
},
{
"groupA": "B",
"groupB": "x",
"SUM(value1)": 20,
"count": 7,
"avg__C": 22,
"%SUM(value1)": 0.2,
"%avg__B": 0.4,
},
{
"groupA": "C",
"groupB": "y",
"SUM(value1)": 25,
"count": 8,
"avg__C": 33,
"%SUM(value1)": 0.25,
"%avg__B": 0.1,
},
{
"groupA": "C",
"groupB": "z",
"SUM(value1)": 40,
"count": 9,
"avg__C": 44,
"%SUM(value1)": 0.4,
"%avg__B": 0.3,
},
]
self.assertEqual(expected, data["records"])
def test_parse_adhoc_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SIMPLE",
"clause": "HAVING",
"subject": "SUM(value1)",
"operator": "<",
"comparator": "10",
},
{
"expressionType": "SQL",
"clause": "HAVING",
"sqlExpression": "SUM(value1) > 5",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual(
[{"op": "<", "val": "10", "col": "SUM(value1)"}],
query_obj["extras"]["having_druid"],
)
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("(SUM(value1) > 5)", query_obj["extras"]["having"])
def test_adhoc_filters_overwrite_legacy_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
"having": "SUM(value1) > 5",
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual([], query_obj["extras"]["having_druid"])
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("", query_obj["extras"]["having"])
def test_query_obj_merges_percent_metrics(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["sum__A", "count", "avg__C"],
"percent_metrics": ["sum__A", "avg__B", "max__Y"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
["sum__A", "count", "avg__C", "avg__B", "max__Y"], query_obj["metrics"]
)
def test_query_obj_throws_columns_and_metrics(self):
datasource = self.get_datasource_mock()
form_data = {"all_columns": ["A", "B"], "metrics": ["x", "y"]}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
del form_data["metrics"]
form_data["groupby"] = ["B", "C"]
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_merges_all_columns(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
"all_columns": ["colA", "colB", "colC"],
"order_by_cols": ['["colA", "colB"]', '["colC"]'],
}
super_query_obj.return_value = {
"columns": ["colD", "colC"],
"groupby": ["colA", "colB"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(form_data["all_columns"], query_obj["columns"])
self.assertEqual([], query_obj["groupby"])
self.assertEqual([["colA", "colB"], ["colC"]], query_obj["orderby"])
def test_query_obj_uses_sortby(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["colA", "colB"],
"order_desc": False,
}
def run_test(metric):
form_data["timeseries_limit_metric"] = metric
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(["colA", "colB", metric], query_obj["metrics"])
self.assertEqual([(metric, True)], query_obj["orderby"])
run_test("simple_metric")
run_test(
{
"label": "adhoc_metric",
"expressionType": "SIMPLE",
"aggregate": "SUM",
"column": {"column_name": "sort_column",},
}
)
def test_should_be_timeseries_raises_when_no_granularity(self):
datasource = self.get_datasource_mock()
form_data = {"include_time": True}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.should_be_timeseries()
def test_adhoc_metric_with_sortby(self):
metrics = [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "sum_value",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
]
form_data = {
"metrics": metrics,
"timeseries_limit_metric": {
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"order_desc": False,
}
df = pd.DataFrame({"SUM(value1)": [15], "sum_value": [15]})
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
self.assertEqual(["sum_value"], data["columns"])
class DistBarVizTestCase(SupersetTestCase):
def test_groupby_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "anchovies", None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("votes", data["key"])
expected_values = [
{"x": "pepperoni", "y": 5},
{"x": "cheese", "y": 3},
{"x": NULL_STRING, "y": 2},
{"x": "anchovies", "y": 1},
]
self.assertEqual(expected_values, data["values"])
def test_groupby_nans(self):
form_data = {
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["beds"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame({"beds": [0, 1, nan, 2], "count": [30, 42, 3, 29]})
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("count", data["key"])
expected_values = [
{"x": "1.0", "y": 42},
{"x": "0.0", "y": 30},
{"x": "2.0", "y": 29},
{"x": NULL_STRING, "y": 3},
]
self.assertEqual(expected_values, data["values"])
def test_column_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": ["role"],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "cheese", "pepperoni"],
"role": ["engineer", "engineer", None, None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)
expected = [
{
"key": NULL_STRING,
"values": [{"x": "pepperoni", "y": 2}, {"x": "cheese", "y": 1}],
},
{
"key": "engineer",
"values": [{"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}],
},
]
self.assertEqual(expected, data)
class PairedTTestTestCase(SupersetTestCase):
def test_get_data_transforms_dataframe(self):
form_data = {
"groupby": ["groupA", "groupB", "groupC"],
"metrics": ["metric1", "metric2", "metric3"],
}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"metric1": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 4},
{"x": 200, "y": 5},
{"x": 300, "y": 6},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 7},
{"x": 200, "y": 8},
{"x": 300, "y": 9},
],
"group": ("c1", "c2", "c3"),
},
],
"metric2": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 40},
{"x": 200, "y": 50},
{"x": 300, "y": 60},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 70},
{"x": 200, "y": 80},
{"x": 300, "y": 90},
],
"group": ("c1", "c2", "c3"),
},
],
"metric3": [
{
"values": [
{"x": 100, "y": 100},
{"x": 200, "y": 200},
{"x": 300, "y": 300},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 400},
{"x": 200, "y": 500},
{"x": 300, "y": 600},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 700},
{"x": 200, "y": 800},
{"x": 300, "y": 900},
],
"group": ("c1", "c2", "c3"),
},
],
}
self.assertEqual(data, expected)
def test_get_data_empty_null_keys(self):
form_data = {"groupby": [], "metrics": ["", None]}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300]
raw[""] = [1, 2, 3]
raw[None] = [10, 20, 30]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"N/A": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": "All",
}
],
"NULL": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": "All",
}
],
}
self.assertEqual(data, expected)
class PartitionVizTestCase(SupersetTestCase):
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_time_series_option(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {}
test_viz = viz.PartitionViz(datasource, form_data)
super_query_obj.return_value = {}
query_obj = test_viz.query_obj()
self.assertFalse(query_obj["is_timeseries"])
test_viz.form_data["time_series_option"] = "agg_sum"
query_obj = test_viz.query_obj()
self.assertTrue(query_obj["is_timeseries"])
def test_levels_for_computes_levels(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
time_op = "agg_sum"
test_viz = viz.PartitionViz(Mock(), {})
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {DTTM_ALIAS: 1800, "metric1": 45, "metric2": 450, "metric3": 4500}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 600, "b1": 600, "c1": 600},
"metric1": {"a1": 6, "b1": 15, "c1": 24},
"metric2": {"a1": 60, "b1": 150, "c1": 240},
"metric3": {"a1": 600, "b1": 1500, "c1": 2400},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
time_op = "agg_mean"
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {
DTTM_ALIAS: 200.0,
"metric1": 5.0,
"metric2": 50.0,
"metric3": 500.0,
}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 200, "c1": 200, "b1": 200},
"metric1": {"a1": 2, "b1": 5, "c1": 8},
"metric2": {"a1": 20, "b1": 50, "c1": 80},
"metric3": {"a1": 200, "b1": 500, "c1": 800},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_diff_computes_difference(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {})
time_op = "point_diff"
levels = test_viz.levels_for_diff(time_op, groups, df)
expected = {"metric1": 6, "metric2": 60, "metric3": 600}
self.assertEqual(expected, levels[0].to_dict())
expected = {
"metric1": {"a1": 2, "b1": 2, "c1": 2},
"metric2": {"a1": 20, "b1": 20, "c1": 20},
"metric3": {"a1": 200, "b1": 200, "c1": 200},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(4, len(levels))
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_time_calls_process_data_and_drops_cols(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {"groupby": groups})
def return_args(df_drop, aggregate):
return df_drop
test_viz.process_data = Mock(side_effect=return_args)
levels = test_viz.levels_for_time(groups, df)
self.assertEqual(4, len(levels))
cols = [DTTM_ALIAS, "metric1", "metric2", "metric3"]
self.assertEqual(sorted(cols), sorted(levels[0].columns.tolist()))
cols += ["groupA"]
self.assertEqual(sorted(cols), sorted(levels[1].columns.tolist()))
cols += ["groupB"]
self.assertEqual(sorted(cols), sorted(levels[2].columns.tolist()))
cols += ["groupC"]
self.assertEqual(sorted(cols), sorted(levels[3].columns.tolist()))
self.assertEqual(4, len(test_viz.process_data.mock_calls))
def test_nest_values_returns_hierarchy(self):
raw = {}
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
levels = test_viz.levels_for("agg_sum", groups, df)
nest = test_viz.nest_values(levels)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
def test_nest_procs_returns_hierarchy(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
metrics = ["metric1", "metric2", "metric3"]
procs = {}
for i in range(0, 4):
df_drop = df.drop(groups[i:], 1)
pivot = df_drop.pivot_table(
index=DTTM_ALIAS, columns=groups[:i], values=metrics
)
procs[i] = pivot
nest = test_viz.nest_procs(procs)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(None, nest[i].get("val"))
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(3, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
self.assertEqual(
1, len(nest[0]["children"][0]["children"][0]["children"][0]["children"])
)
def test_get_data_calls_correct_method(self):
test_viz = viz.PartitionViz(Mock(), {})
df = Mock()
with self.assertRaises(ValueError):
test_viz.get_data(df)
test_viz.levels_for = Mock(return_value=1)
test_viz.nest_values = Mock(return_value=1)
test_viz.form_data["groupby"] = ["groups"]
test_viz.form_data["time_series_option"] = "not_time"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "agg_sum"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "agg_mean"
test_viz.get_data(df)
self.assertEqual("agg_mean", test_viz.levels_for.mock_calls[2][1][0])
test_viz.form_data["time_series_option"] = "point_diff"
test_viz.levels_for_diff = Mock(return_value=1)
test_viz.get_data(df)
self.assertEqual("point_diff", test_viz.levels_for_diff.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "point_percent"
test_viz.get_data(df)
self.assertEqual("point_percent", test_viz.levels_for_diff.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "point_factor"
test_viz.get_data(df)
self.assertEqual("point_factor", test_viz.levels_for_diff.mock_calls[2][1][0])
test_viz.levels_for_time = Mock(return_value=1)
test_viz.nest_procs = Mock(return_value=1)
test_viz.form_data["time_series_option"] = "adv_anal"
test_viz.get_data(df)
self.assertEqual(1, len(test_viz.levels_for_time.mock_calls))
self.assertEqual(1, len(test_viz.nest_procs.mock_calls))
test_viz.form_data["time_series_option"] = "time_series"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[3][1][0])
self.assertEqual(7, len(test_viz.nest_values.mock_calls))
class RoseVisTestCase(SupersetTestCase):
def test_rose_vis_get_data(self):
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
t3 = pd.Timestamp("2004")
raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
df = pd.DataFrame(raw)
fd = {"metrics": ["metric1"], "groupby": ["groupA"]}
test_viz = viz.RoseViz(Mock(), fd)
test_viz.metrics = fd["metrics"]
res = test_viz.get_data(df)
expected = {
946684800000000000: [
{"time": t1, "value": 1, "key": ("a1",), "name": ("a1",)},
{"time": t1, "value": 4, "key": ("b1",), "name": ("b1",)},
{"time": t1, "value": 7, "key": ("c1",), "name": ("c1",)},
],
1009843200000000000: [
{"time": t2, "value": 2, "key": ("a1",), "name": ("a1",)},
{"time": t2, "value": 5, "key": ("b1",), "name": ("b1",)},
{"time": t2, "value": 8, "key": ("c1",), "name": ("c1",)},
],
1072915200000000000: [
{"time": t3, "value": 3, "key": ("a1",), "name": ("a1",)},
{"time": t3, "value": 6, "key": ("b1",), "name": ("b1",)},
{"time": t3, "value": 9, "key": ("c1",), "name": ("c1",)},
],
}
self.assertEqual(expected, res)
class TimeSeriesTableVizTestCase(SupersetTestCase):
def test_get_data_metrics(self):
form_data = {"metrics": ["sum__A", "count"], "groupby": []}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t2]
raw["sum__A"] = [15, 20]
raw["count"] = [6, 7]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["count", "sum__A"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"sum__A": 15, "count": 6},
t2.strftime(time_format): {"sum__A": 20, "count": 7},
}
self.assertEqual(expected, data["records"])
def test_get_data_group_by(self):
form_data = {"metrics": ["sum__A"], "groupby": ["groupby1"]}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2]
raw["sum__A"] = [15, 20, 25, 30, 35, 40]
raw["groupby1"] = ["a1", "a2", "a3", "a1", "a2", "a3"]
df = | pd.DataFrame(raw) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = | Index([1, 2], name="person_id") | pandas.Index |
import argparse
import logging
from decimal import getcontext, Decimal, ROUND_UP
from pathlib import Path
from typing import Dict, Set
from junitparser import JUnitXml
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
EWM_ALPHA = 0.1
EWM_ADJUST = False
HEATMAP_FIGSIZE = (100, 50)
def parse_input_files(junit_files: str, test_history_csv: str):
if junit_files:
df = parse_junit_to_df(Path(junit_files))
else:
df = pd.read_csv(
test_history_csv,
index_col="timestamp",
parse_dates=["timestamp"],
)
return df.sort_index()
def calc_fliprate(testruns: pd.Series) -> float:
"""Calculate test result fliprate from given test results series"""
if len(testruns) < 2:
return 0.0
first = True
previous = None
flips = 0
possible_flips = len(testruns) - 1
for _, val in testruns.iteritems():
if first:
first = False
previous = val
continue
if val != previous:
flips += 1
previous = val
return flips / possible_flips
def non_overlapping_window_fliprate(testruns: pd.Series, window_size: int, window_count: int) -> pd.Series:
"""Reverse given testruns to latest first and calculate flip rate for non-overlapping run windows"""
testruns_reversed = testruns.iloc[::-1]
fliprate_groups = (
testruns_reversed.groupby(np.arange(len(testruns_reversed)) // window_size)
.apply(calc_fliprate)
.iloc[:window_count]
)
return fliprate_groups.rename(lambda x: window_count - x).sort_index()
def calculate_n_days_fliprate_table(testrun_table: pd.DataFrame, days: int, window_count: int) -> pd.DataFrame:
"""Select given history amount and calculate fliprates for given n day windows.
Return a table containing the results.
"""
data = testrun_table[testrun_table.index >= (testrun_table.index.max() - | pd.Timedelta(days=days * window_count) | pandas.Timedelta |
from scipy import stats
import numpy as np
import pandas as pd
from itertools import combinations
from sklearn.metrics import precision_score,recall_score,accuracy_score,roc_auc_score,f1_score,roc_curve,precision_recall_curve
from static_data import *
import pickle
def normalize(arr):
return arr
# return arr/arr.sum()
def get_consistency_measures(abund_arr,K):
condition_column_split_index = abund_arr.shape[1]//2
arr_lst = []
for abund_arr in [abund_arr[:,:condition_column_split_index],abund_arr[:,condition_column_split_index:]]:
for i, j in combinations(range(abund_arr.shape[1]), 2):
arr_lst.append((((abund_arr[:,i]<=K) & (abund_arr[:,j]<=K)) | ((abund_arr[:,i]>K) & (abund_arr[:,j]>K))))
return np.vstack(arr_lst).mean()
def get_resolution_entropy(abund_arr,I):
df = pd.DataFrame({'abund':abund_arr}).dropna()
df['group_range'] = pd.cut(df['abund'],I)
P = df.groupby('group_range').count()['abund'].values / abund_arr.shape[0]
P = P[P != 0]
RE = -np.median(np.log(P)*P)
return RE
def get_single_sample_metric(metric,ground_truth, estimated,df=None):
if metric == 'spearmanr':
if (ground_truth.shape[0] == 1):
return 1 if ground_truth.values[0] == estimated.values[0] else 0
else:
return stats.spearmanr(ground_truth, estimated).correlation
elif metric == 'nrmse':
if (np.std(ground_truth, axis=0) == 0):
return np.median(np.square(estimated-ground_truth),axis=0)
else:
return np.median(np.square(estimated-ground_truth),axis=0) / np.std(ground_truth, axis=0)
elif metric == 'mrd':
non_zero_truth = ground_truth.copy()
non_zero_truth[non_zero_truth==0] = 1
mrd = np.abs(ground_truth - estimated) / non_zero_truth
return np.median(mrd)
# if (np.count_nonzero(np.linalg.norm(ground_truth)) == 0):
# return np.mean(np.linalg.norm(estimated-ground_truth))
# return np.mean(np.linalg.norm(estimated-ground_truth) / np.linalg.norm(ground_truth))
elif metric == 'RE':
return get_resolution_entropy(estimated,100)
elif metric == 'mean_arr':
try:
mean_arr = np.median(df['arr'])
return mean_arr
except:
return 0
def get_multi_sample_metric(metric,df,ground_truth, estimated):
# if ((ground_truth is not None)):
# with open('/fs/ess/scratch/PCON0009/haoran/isoform_quantification/jobs/benchmark/df.pkl','wb') as f:
# pickle.dump((df,ground_truth,estimated),f)
if (ground_truth is not None):
# estimation_diff_expressed = df['alfc'] > np.quantile(df['alfc'],0.8)
# truth_diff_expressed = df['true_alfc'] > np.quantile(df['true_alfc'],0.8)
# estimation_diff_expressed = df['alfc'] > 1
# truth_diff_expressed = df['true_alfc'] > 1
estimation_diff_expressed = df['estimation_diff_expressed'].astype(int)
truth_diff_expressed = df['truth_diff_expressed'].astype(int)
else:
estimation_diff_expressed = df['estimation_diff_expressed'].astype(int)
# estimation_diff_expressed = df['alfc'] > np.quantile(df['alfc'],0.8)
# estimation_diff_expressed = df['alfc'] > 1
if metric == 'precision':
return precision_score(truth_diff_expressed,estimation_diff_expressed,zero_division=0)
elif metric == 'recall':
return recall_score(truth_diff_expressed,estimation_diff_expressed,zero_division=0)
elif metric == 'accuracy':
return accuracy_score(truth_diff_expressed,estimation_diff_expressed)
elif metric == 'auc':
try:
return roc_auc_score(truth_diff_expressed,estimation_diff_expressed)
except:
return 0
elif metric == 'f1':
return f1_score(truth_diff_expressed,estimation_diff_expressed,zero_division=0)
elif metric == 'spearmanr':
if (ground_truth.shape[0] == 1):
return 1 if ground_truth.values[0] == estimated.values[0] else 0
else:
return stats.spearmanr(ground_truth, estimated,axis=None).correlation
elif metric == 'nrmse':
return np.median(np.median(np.square(estimated-ground_truth),axis=0) / np.std(ground_truth, axis=0))
elif metric == 'mrd':
ground_truth = np.mean(ground_truth,axis=1)
estimated = np.mean(estimated,axis=1)
non_zero_truth = ground_truth.copy()
non_zero_truth += 0.01
# non_zero_truth[non_zero_truth==0] = 1
mrd = np.abs(ground_truth - estimated) / non_zero_truth
return np.mean(mrd)
# if (np.count_nonzero(np.linalg.norm(ground_truth)) == 0):
# return np.mean(np.linalg.norm(estimated-ground_truth))
# return np.mean(np.linalg.norm(estimated-ground_truth) / np.linalg.norm(ground_truth))
elif metric =='CM':
# return get_consistency_measures(estimated,np.median(estimated))
return get_consistency_measures(estimated,1)
elif metric == 'RM':
std_1,std_2 = df['std#1'].values,df['std#2'].values
return np.sqrt(np.mean([np.square(std_1),np.square(std_2)]))
elif metric == 'RE':
return np.median([get_resolution_entropy(estimated[:,i],100) for i in range(estimated.shape[1])])
elif metric == 'mean_arr':
arr_columns = [x for x in list(df.columns) if 'arr_' in x]
return np.median(df[arr_columns].values)
def preprocess_single_sample_util(df, kvalues_dict,num_exon_dict,isoform_length_dict,isoform_gene_dict,num_isoforms_dict):
df['estimated_abund'] = normalize(df['estimated_abund'])
df['true_abund'] = normalize(df['true_abund'])
df['std'] = (df['true_abund'] - df['estimated_abund']) / 2
df['arr'] = df['estimated_abund'] / df['true_abund']
df.loc[np.isinf(df['arr']),'arr'] = 0
df['log2_true_abund'] = np.log2(df['true_abund']+1)
df = df[df['true_abund'] > 0.5]
df = df.set_index('isoform').assign(K_value=pd.Series(kvalues_dict),num_exons=pd.Series(num_exon_dict),isoform_length=pd.Series(isoform_length_dict),gene=pd.Series(isoform_gene_dict),num_isoforms=pd.Series(num_isoforms_dict)).reset_index()
df = df.dropna()
return df
def preprocess_multi_sample_diff_condition_util(estimated_df,true_expression_df, kvalues_dict,num_exon_dict,isoform_length_dict,isoform_gene_dict,num_isoforms_dict):
condition_column_split_index = estimated_df.shape[1]//2
estimated_arr = estimated_df.drop(0,axis=1).to_numpy()
estimated_arr = normalize(estimated_arr)
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'han'
import os
import h5py
import math
import torch
import torch.utils.data
from torch.utils.data.sampler import Sampler, SequentialSampler
import logging
import pandas as pd
from dataset.preprocess_data import PreprocessData
from utils.functions import *
logger = logging.getLogger(__name__)
class SquadDataset:
"""
dataset module for SQuAD
"""
def __init__(self, global_config):
self._data = {}
self._attr = {}
self.meta_data = {}
self.global_config = global_config
# whether preprocessing squad dataset
is_exist_dataset_h5 = os.path.exists(self.global_config['data']['dataset_h5'])
assert is_exist_dataset_h5, 'not found dataset hdf5 file in %s' % self.global_config['data']['dataset_h5']
self._load_hdf5()
def _load_hdf5(self):
"""
load squad hdf5 file
:return:
"""
squad_h5_path = self.global_config['data']['dataset_h5']
with h5py.File(squad_h5_path, 'r') as f:
f_data = f['data']
for name in ['train', 'dev']:
self._data[name] = {}
for sub_name in ['answer_range', 'samples_id']:
self._data[name][sub_name] = np.array(f_data[name][sub_name])
for sub_name in ['context', 'question']:
cur_data = f_data[name][sub_name]
self._data[name][sub_name] = {}
# 'token', 'pos', 'ent', 'em', 'em_lemma', 'right_space'
for subsub_name in cur_data.keys():
self._data[name][sub_name][subsub_name] = np.array(cur_data[subsub_name])
for key, value in f.attrs.items():
self._attr[key] = value
# 'id2word', 'id2char', 'id2pos', 'id2ent'
for key in f['meta_data'].keys():
self.meta_data[key] = np.array(f['meta_data'][key])
self._char2id = dict(zip(self.meta_data['id2char'],
range(len(self.meta_data['id2char']))))
def get_dataloader_train(self, batch_size, num_workers):
"""
a train data dataloader
:param batch_size:
:return:
"""
return self.get_dataloader(batch_size, 'train', num_workers, shuffle=True)
def get_dataloader_dev(self, batch_size, num_workers):
"""
a dev data dataloader
:param batch_size:
:return:
"""
return self.get_dataloader(batch_size, 'dev', num_workers, shuffle=False)
def get_dataloader(self, batch_size, type, num_workers, shuffle):
"""
get dataloader on train or dev dataset
:param batch_size:
:param type: 'train' or 'dev'
:return:
"""
data = self._data[type]
dataset = CQA_Dataset(data['context'],
data['question'],
data['answer_range'],
self.meta_data,
self.global_config['preprocess'])
if shuffle:
sampler = SortedBatchSampler(dataset.get_lengths(), batch_size)
else:
sampler = SequentialSampler(dataset)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
collate_fn=self.collect_fun,
num_workers=num_workers)
return dataloader
def collect_fun(self, batch):
"""
collect function for DataLoader, will generate char idx currently
:param batch:
:return:
"""
context = []
context_f = []
question = []
question_f = []
answer_range = []
for ele in batch:
context.append(ele[0])
question.append(ele[1])
context_f.append(ele[2])
question_f.append(ele[3])
answer_range.append(ele[4])
# word idx
bat_context, max_ct_len = del_zeros_right(torch.stack(context, dim=0))
bat_question, max_qt_len = del_zeros_right(torch.stack(question, dim=0))
bat_answer, _ = del_zeros_right(torch.stack(answer_range, dim=0))
# additional features
bat_context_f = None
bat_question_f = None
if context_f[0] is not None:
bat_context_f = torch.stack(context_f, dim=0)[:, 0:max_ct_len, :]
bat_question_f = torch.stack(question_f, dim=0)[:, 0:max_qt_len, :]
# generate char idx
bat_context_char = None
bat_question_char = None
if self.global_config['preprocess']['use_char']:
bat_context_char = self._batch_word_to_char(bat_context)
bat_question_char = self._batch_word_to_char(bat_question)
return bat_context, bat_question, bat_context_char, bat_question_char, bat_context_f, bat_question_f, bat_answer
def get_batch_train(self, batch_size):
"""
a train data batch
.. warning::
This method is now deprecated in favor of
:func:`get_dataloader_train`.
"""
return self.get_batch_data(batch_size, 'train')
def get_batch_dev(self, batch_size):
"""
development data batch
.. warning::
This method is now deprecated in favor of
:func:`get_dataloader_dev`.
"""
return self.get_batch_data(batch_size, 'dev')
def get_batch_data(self, batch_size, type):
"""
same with BatchSampler
.. warning::
This method is now deprecated in favor of
:func:`BatchSampler` and `get_dataloader`.
"""
data = self._data[type]
data_size = len(data['context'])
i = 0
while i < data_size:
j = min(i + batch_size, data_size)
bat = [data['context'][i:j], data['question'][i:j], data['answer_range'][i:j]]
bat_tensor = [to_long_tensor(x) for x in bat]
i = j
yield bat_tensor
def get_all_samples_id_train(self):
return self.get_all_samples_id('train')
def get_all_samples_id_dev(self):
return self.get_all_samples_id('dev')
def get_all_samples_id(self, type):
"""
get samples id of 'train' or 'dev' data
:param type:
:return:
"""
data = self._data[type]
return data['samples_id']
def get_all_ct_right_space_train(self):
return self.get_all_ct_right_space('train')
def get_all_ct_right_space_dev(self):
return self.get_all_ct_right_space('dev')
def get_all_ct_right_space(self, type):
data = self._data[type]
return data['context']['right_space']
def get_train_batch_cnt(self, batch_size):
"""
get count of train batches
:param batch_size: single batch size
:return: count
"""
data_size = self._attr['train_size']
cnt_batch = math.ceil(data_size * 1.0 / batch_size)
return cnt_batch
def get_dev_batch_cnt(self, batch_size):
"""
get count of dev batches
:param batch_size: single batch size
:return: count
"""
data_size = self._attr['dev_size']
cnt_batch = math.ceil(data_size * 1.0 / batch_size)
return cnt_batch
def _batch_word_to_char(self, batch_wordid):
"""
transform batch with sentence of wordid to batch data with sentence of char id
:param batch_wordid: (batch, seq_len), torch tensor
:return: (batch, seq_len, word_len), torch tensor
"""
batch_wordid = batch_wordid.numpy()
batch_word = [self.sentence_id2word(x) for x in batch_wordid]
batch_length = [[len(x) if x != PreprocessData.padding else 0 for x in s] for s in batch_word]
batch_max_len = np.max(batch_length)
batch_char = list(map(lambda x: self.sentence_char2id(x, max_len=batch_max_len), batch_word))
batch_char = np.stack(batch_char, axis=0)
return to_long_tensor(batch_char)
def gen_batch_with_char(self, batch_data, enable_char, device):
"""
word batch to generate char barch, also move to device, used in train or valid steps
.. warning::
This method is now deprecated in favor of collect function in DataLoader
"""
batch_data = [del_zeros_right(x)[0] for x in batch_data]
if not enable_char:
bat_context, bat_question, bat_answer_range = [x.to(device) for x in batch_data]
bat_context_char = None
bat_question_char = None
else:
bat_context, bat_question, bat_answer_range = batch_data
bat_context_char = self._batch_word_to_char(bat_context)
bat_question_char = self._batch_word_to_char(bat_question)
bat_context, bat_question, bat_context_char, bat_question_char, bat_answer_range = [x.to(device) for x in
[bat_context,
bat_question,
bat_context_char,
bat_question_char,
bat_answer_range]]
return bat_context, bat_question, bat_context_char, bat_question_char, bat_answer_range
def sentence_id2word(self, s_id):
"""
transform a sentence with word id to a sentence with real word
:param s_id:
:return:
"""
s = map(lambda id: self.meta_data['id2word'][id], s_id)
return list(s)
def sentence_word2id(self, s):
"""
transform a sentence with word to a sentence with word id
(Note that it's a slow version when using np.where)
:param s:
:return:
"""
s_id = map(lambda word: np.where(self.meta_data['id2word'] == word)[0][0], s)
return np.array(list(s_id))
def word_id2char(self, w_id):
w = map(lambda id: self.meta_data['id2char'][id], w_id)
return list(w)
def word_char2id(self, w):
if w == PreprocessData.padding: # not actual word
return np.ones(1, ) # make sure word length>0 and right encoding, here any none-zero value not effect
w_id = map(lambda ch: self._char2id[ch], w)
return np.array(list(w_id))
def sentence_char2id(self, s, max_len=None):
s_cid = list(map(lambda w: self.word_char2id(w), s))
if max_len is None:
word_len = list(map(lambda x: len(x), s_cid))
max_len = np.max(word_len)
s_cid_pad = map(lambda x: np.pad(x, (0, max_len - len(x)), 'constant', constant_values=(0, 0)), s_cid)
return np.stack(list(s_cid_pad), axis=0)
def gather_context_seq_len(self, type, steps=None):
"""
gather the context sequence counts with different lengths
:param type: 'train' or 'dev' data
:param steps: set to None means default steps
:return:
"""
data = self._data[type]
context = to_long_tensor(data['context']['token'])
mask = compute_mask(context)
lengths = mask.eq(1).long().sum(1).squeeze()
length_pd = pd.DataFrame(data=lengths.numpy(), columns=['length'])
if steps is None:
steps = [0, 100, 200, 300, 400, 500, 600, 700, 800]
assert len(steps) > 0
# get step length cnt
real_step = []
step_length_cnt = []
for i in range(1, len(steps)):
lower_bound = steps[i - 1]
upper_bound = steps[i]
assert lower_bound < upper_bound # [lower_bound, upper_bound)
real_step.append((lower_bound, upper_bound))
valid = length_pd[(length_pd['length'] < upper_bound) & (length_pd['length'] >= lower_bound)]
tmp_cnt = valid.shape[0]
step_length_cnt.append(tmp_cnt)
rtn_step_length = list(zip(real_step, step_length_cnt))
# get all length cnt
length_cnt = length_pd['length'].value_counts().to_frame(name='cnt')
length_cnt['length'] = length_cnt.index
return rtn_step_length, length_cnt
def gather_answer_seq_len(self, type, max_len=None):
"""
gather the answer sequence counts with different lengths
:param type: 'train' or 'dev' data
:param max_len:
:return:
"""
data = self._data[type]
answer_range = data['answer_range']
lengths = []
for i in range(answer_range.shape[0]):
tmp_lens = []
for j in range(int(answer_range.shape[1] / 2)):
if answer_range[i, j * 2] != -1:
tmp_lens.append(answer_range[i, j * 2 + 1] - answer_range[i, j * 2] + 1)
lengths.append(min(tmp_lens))
length_pd = | pd.DataFrame(data=lengths, columns=['length']) | pandas.DataFrame |
from __future__ import division
import time
from datetime import datetime
import sys
import numpy as np
import faiss
import pandas as pd
import os
'''
* Create a GitHub repo to house the code and results.
* Show results with different:
X vector length - 96, 300, 4096
* dataset vector count
* batch size
* k
* Card type
* Card count
* Distance metric
* Instance name
* Additionally, record in the .csv file:
* Total latency
* Total board memory
* NO - Hourly cost
* Experiment repetitions
* Dataset load time
* Measurement date
* Create a gaussian kernel regression model to predict performance using feature scaling.
'''
ngpus = [1, 8, 16]
vec_lengths = [96, 300, 4096]
test = {
# Specify properties of the GPU
'gpu_name': "<NAME>",
'n_gpu': 8,
'gpu_RAM': 8 * 12,
'faiss_v': '1.4.0',
'instance': 'p2.8xlarge',
# Specify some test properties
'metric': 'L2'
}
# These are all of the columns in our output. This array holds column order.
column_names = ['timestamp','gpu_name','n_gpu','metric',
'dataset_name','dataset_count', 'vec_len', 'dtype',
'batch_size','k',
'latency','load_time','max_latency','min_latency',
'repetitions','gpu_RAM','faiss_v','instance','notes']
# TODO
# DONE - 1x VSX Max
# DONE - 1x V100_f32 Max
# DONE - 1x V100_f16 Max
# DONE - 4x VSX Max
# DONE - 4x V100 Max f32 and f16
# DONE - 10x VSX Max
# DONE - 8x V100 f32 and f16 Max
# DONE - 16x K80 Max
dataset_sizes = [
( int(1e6), 96, '1M x 96'),
( int(10e6), 96, '10M x 96'),
( int(25.07e6), 96, '1x K80 Max Capacity'),
( int(33.43e6), 96, '1x V100 f32 Max Capacity'), # Max TBD
( int(50e6), 96, '50M x 96'),
( int(50.15e6), 96, '2x K80 Max Capacity'),
( int(66.86e6), 96, '1x V100 f16 Max Capacity'), # Max TBD
( int(100e6), 96, '100M x 96'),
( int(133.69), 96, '4x V100 f32 Max Capacity'), # Max TBD
( int(200e6), 96, '200M x 96'),
(int(200.54e6), 96, '8x K80 Max Capacity'),
(int(267.38e6), 96, '8x V100 f32 Max Capacity'), # Max TBD
(int(267.38e6), 96, '4x V100 f16 Max Capacity'), # Max TBD
( int(400e6), 96, '400M x 96'),
(int(401.08e6), 96, '16x K80 Max Capacity'),
(int(469.76e6), 96, '1x VSX Max Capacity'), # 469.762048e6
(int(534.76e6), 96, '8x V100 f16 Max Capacity'), # Max TBD
( int(1.879e9), 96, '4x VSX Max Capacity'), # 1.879048192e9
( int(4.697e9), 96, '10x VSX Max Capacity'), # 4.69762048e9
( int(1e6), 300, '1M x 300'),
( int(5e6), 300, '5M x 300'),
( int(8.08e6), 300, '1x K80 Max Capacity'),
( int(10e6), 300, '10M x 300'),
( int(10.77e6), 300, '1x V100 f32 Max Capacity'), # Max TBD
( int(21.54e6), 300, '1x V100 f16 Max Capacity'), # Max TBD
( int(43.08e6), 300, '4x V100 f32 Max Capacity'), # Max TBD
( int(50e6), 300, '50M x 300'),
( int(64.56e6), 300, '8x K80 Max Capacity'),
( int(86.16e6), 300, '4x V100 f16 Max Capacity'), # Max TBD
( int(86.16e6), 300, '8x V100 f32 Max Capacity'), # Max TBD
(int(129.12e6), 300, '16x K80 Max Capacity'),
(int(150.32e6), 300, '1x VSX Max Capacity'), # 150.323855e6
(int(172.32e6), 300, '8x V100 f16 Max Capacity'), # Max TBD
(int(601.28e6), 300, '4x VSX Max Capacity'), # ...
( int(1.503e9), 300, '10x VSX Max Capacity'), # 1.5032385536e9
( int(100e3), 4096, '100K x 4096'),
( int(590e3), 4096, '1x K80 Max Capacity'),
( int(780e3), 4096, '1x V100 f32 Max Capacity'),
( int(1e6), 4096, '1M x 4096'),
( int(1.56e6), 4096, '1x V100 f16 Max Capacity'),
( int(2e6), 4096, '2M x 4096'),
( int(3.12e6), 4096, '4x V100 f32 Max Capacity'), # Max TBD
( int(6.24e6), 4096, '4x V100 f16 Max Capacity'), # Max TBD
( int(6.24e6), 4096, '8x V100 f32 Max Capacity'), # Max TBD
( int(4.74e6), 4096, '8x K80 Max Capacity'),
( int(9.48e6), 4096, '16x K80 Max Capacity'),
( int(11.01e6), 4096, '1x VSX Max Capacity'), # 11.010048e6
( int(12.28e6), 4096, '8x V100 f16 Max Capacity'), # Max TBD
( int(44.04e6), 4096, '4x VSX Max Capacity'),
( int(110.1e6), 4096, '10x VSX Max Capacity'), # 110.10048e6
]
def get_max_capacity(test):
'''
This function looks up the maximum number of vectors that the current GPU
configuration can support at this vector length.
'''
# Select the trials matching this test's parameters.
df = pd.read_csv('max_dataset_size.csv')
df = df.loc[(df.gpu_name == test['gpu_name']) &
(df.n_gpu == test['n_gpu']) &
(df.gpu_RAM == test['gpu_RAM']) &
(df.faiss_v == test['faiss_v']) &
(df.vec_len == test['vec_len'])]
# Error if there's no matching dataset size experiments.
if df.empty or df.loc[df.success == True].empty:
print('ERROR - No capacity data for this configuration')
return -1
# Find the maximum successful size and the minimum fail size.
max_success = int(df.loc[df.success == True].vec_count.max())
min_fail = int(df.loc[df.success == False].vec_count.min())
# Warn if the capacity experiments haven't converged yet.
if (min_fail - max_success) > 10000:
print('WARNING: Capacity experiments not complete for %dx %s with length %d' % (test['n_gpu'], test['gpu_name'], test['vec_len']))
return max_success
batch_sizes = [1, 16, 64, 256, 1024]
ks = [1, 10, 100]
repetitions = 10
# Memory map the dataset files. We'll only load the portions that we need
# for each experiment into memory
vecs_mmap = {96: np.load('210000000_x_96.npy', mmap_mode='r'),
300: np.load('100000000_x_300.npy', mmap_mode='r'),
4096: np.load('5000000_x_4096.npy', mmap_mode='r')}
# For each dataset size...
for ds_shape in dataset_sizes:
print('===================================================')
print('Running benchmark with dataset %s x %d' % ("{:,}".format(ds_shape[0]), ds_shape[1]))
print('===================================================')
# Create a test object for storing all parameters and results of this experiment
test['dataset_count'] = ds_shape[0]
test['vec_len'] = ds_shape[1]
# Lookup the maximum supported capacity for the current GPU configuration.
max_count = get_max_capacity(test)
# Check if we can support this size.
if test['dataset_count'] > max_count:
print('Dataset size not supported, skipping.')
continue
# =====================================
# Generate Dataset
# =====================================
# Record the name of the dataset.
test['dataset_name'] = 'random'
test['notes'] = ds_shape[2]
generate = False
t0 = time.time()
if generate:
print('Generating dataset...')
sys.stdout.flush()
vecs = np.random.rand(ds_shape[0], ds_shape[1]).astype('float32')
else:
print('Loading dataset...')
sys.stdout.flush()
# Read the portion of the dataset that we need.
vecs = vecs_mmap[ds_shape[1]][0:ds_shape[0],:]
print(' Done. Took %.2f seconds.' % (time.time() - t0))
print('Dataset is [%d x %d]' % vecs.shape)
# =====================================
# FAISS Setup
# =====================================
# Build a flat (CPU) index
cpu_index = faiss.IndexFlatL2(vecs.shape[1])
# Print the number of available GPUs.
print('Number of available GPUs: %d Using: %d' % (faiss.get_num_gpus(), test['n_gpu']))
# Enable sharding so that the dataset is divided across the GPUs rather than
# replicated.
co = faiss.GpuMultipleClonerOptions()
co.shard = True
# FAISS uses 32-bit floats.
test['dtype'] = 'float32'
# Make it into a gpu index
gpu_index = faiss.index_cpu_to_all_gpus(cpu_index, co=co, ngpu=test['n_gpu'])
# Add vecs to our GPU index
print('Adding dataset to index...')
sys.stdout.flush()
t0 = time.time()
gpu_index.add(vecs)
elapsed = time.time() - t0
print('Building index took %.2f seconds' % (elapsed))
test['load_time'] = elapsed
# =====================================
# Benchmark
# =====================================
# Warm up query.
D, I = gpu_index.search(vecs[:16], k=100)
tests = []
# Run queries with different batch sizes and 'k' values.
for batch_size in batch_sizes:
for k in ks:
print('Running batch %d with k=%d...' % (batch_size, k))
sys.stdout.flush()
test["batch_size"] = batch_size
test["k"] = k
test["repetitions"] = repetitions
test['timestamp'] = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
# Run the query multiple times. We'll record the min, max, and
# average latency across the repetitions.
run_times = []
for run in range(repetitions):
t0 = time.time()
D, I = gpu_index.search(vecs[:batch_size], k)
run_times.append(time.time() - t0)
test['latency'] = np.mean(run_times)
test['min_latency'] = min(run_times)
test['max_latency'] = max(run_times)
print(' Min: %.3f sec, Max: %.3f, Avg: %.3f' % (min(run_times), max(run_times), np.mean(run_times)))
# Add the completed test to the list. Make a copy of the 'test'
# dictionary since we will re-use this object.
tests.append(test.copy())
# Append these results to the .csv file.
if os.path.isfile('benchmark_tests.csv'):
df = | pd.read_csv('benchmark_tests.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
np.random.seed(99)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputClassifier, MultiOutputRegressor
from sklearn.multiclass import OneVsRestClassifier
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgbm
from sklearn.model_selection import KFold, cross_val_score,StratifiedKFold
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize
import csv
import re
from xgboost import XGBRegressor, XGBClassifier
from sklearn.metrics import mean_squared_log_error, mean_squared_error,balanced_accuracy_score
from scipy import stats
from sklearn.model_selection import RandomizedSearchCV
import scipy as sp
import time
import copy
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from collections import Counter, defaultdict
import pdb
from tqdm.notebook import tqdm
from pathlib import Path
#sklearn data_preprocessing
from sklearn.preprocessing import StandardScaler, MinMaxScaler
#sklearn categorical encoding
import category_encoders as ce
#sklearn modelling
from sklearn.model_selection import KFold
from collections import Counter, defaultdict
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
# boosting library
import xgboost as xgb
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import copy
#################################################################################
#### Regression or Classification type problem
def analyze_problem_type(train, target, verbose=0) :
target = copy.deepcopy(target)
train = copy.deepcopy(train)
if isinstance(train, pd.Series):
train = pd.DataFrame(train)
### the number of categories cannot be more than 2% of train size ####
### this determines the number of categories above which integer target becomes a regression problem ##
cat_limit = int(train.shape[0]*0.02)
cat_limit = min(cat_limit, 100) ## anything over 100 categories is a regression problem ##
cat_limit = max(cat_limit, 10) ### anything above at least 10 categories is a Regression problem
float_limit = 15 ### number of categories a float target above which it becomes a Regression problem
if isinstance(target, str):
target = [target]
if len(target) == 1:
targ = target[0]
model_label = 'Single_Label'
else:
targ = target[0]
model_label = 'Multi_Label'
#### This is where you detect what kind of problem it is #################
if train[targ].dtype in ['int64', 'int32','int16','int8']:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 2 and len(train[targ].unique()) <= cat_limit:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
elif train[targ].dtype in ['float16','float32','float64','float']:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 2 and len(train[targ].unique()) <= float_limit:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
else:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
########### print this for the start of next step ###########
if verbose <= 1:
print('''#### %s %s Feature Selection Started ####''' %(
model_label,model_class))
return model_class
#####################################################################################
from sklearn.base import TransformerMixin, BaseEstimator
from collections import defaultdict
class My_LabelEncoder(BaseEstimator, TransformerMixin):
"""
################################################################################################
###### The My_LabelEncoder class works just like sklearn's Label Encoder but better! #######
##### It label encodes any object or category dtype in your dataset. It also handles NaN's.####
## The beauty of this function is that it takes care of encoding unknown (future) values. #####
##################### This is the BEST working version - don't mess with it!! ##################
################################################################################################
Usage:
le = My_LabelEncoder()
le.fit_transform(train[column]) ## this will give your transformed values as an array
le.transform(test[column]) ### this will give your transformed values as an array
Usage in Column Transformers and Pipelines:
No. It cannot be used in pipelines since it need to produce two columns for the next stage in pipeline.
See my other module called My_LabelEncoder_Pipe() to see how it can be used in Pipelines.
"""
def __init__(self):
self.transformer = defaultdict(str)
self.inverse_transformer = defaultdict(str)
self.max_val = 0
def fit(self,testx, y=None):
## testx must still be a pd.Series for this encoder to work!
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = pd.Series(testx)
else:
#### There is no way to transform dataframes since you will get a nested renamer error if you try ###
### But if it is a one-dimensional dataframe, convert it into a Series
#### Do not change this since I have tested it and it works.
if testx.shape[1] == 1:
testx = pd.Series(testx.values.ravel(),name=testx.columns[0])
else:
#### Since it is multi-dimensional, So in this case, just return the object as is
return self
ins = np.unique(testx.factorize()[1]).tolist()
outs = np.unique(testx.factorize()[0]).tolist()
#ins = testx.value_counts(dropna=False).index
if -1 in outs:
# it already has nan if -1 is in outs. No need to add it.
if not np.nan in ins:
ins.insert(0,np.nan)
self.transformer = dict(zip(ins,outs))
self.inverse_transformer = dict(zip(outs,ins))
return self
def transform(self, testx, y=None):
## testx must still be a pd.Series for this encoder to work!
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = pd.Series(testx)
else:
#### There is no way to transform dataframes since you will get a nested renamer error if you try ###
### But if it is a one-dimensional dataframe, convert it into a Series
#### Do not change this since I have tested it and it works.
if testx.shape[1] == 1:
testx = pd.Series(testx.values.ravel(),name=testx.columns[0])
else:
#### Since it is multi-dimensional, So in this case, just return the data as is
#### Do not change this since I have tested it and it works.
return testx
### now convert the input to transformer dictionary values
new_ins = np.unique(testx.factorize()[1]).tolist()
missing = [x for x in new_ins if x not in self.transformer.keys()]
if len(missing) > 0:
for each_missing in missing:
self.transformer[each_missing] = int(self.max_val + 1)
self.inverse_transformer[int(self.max_val+1)] = each_missing
self.max_val = int(self.max_val+1)
else:
self.max_val = np.max(list(self.transformer.values()))
### To handle category dtype you must do the next step #####
#### Do not change this since I have tested it and it works.
testk = testx.map(self.transformer)
if testx.dtype not in [np.int16, np.int32, np.int64, float, bool, object]:
if testx.isnull().sum().sum() > 0:
fillval = self.transformer[np.nan]
testk = testk.cat.add_categories([fillval])
testk = testk.fillna(fillval)
testk = testk.astype(int)
return testk
else:
testk = testk.astype(int)
return testk
else:
outs = testx.map(self.transformer).values.astype(int)
return outs
def inverse_transform(self, testx, y=None):
### now convert the input to transformer dictionary values
if isinstance(testx, pd.Series):
outs = testx.map(self.inverse_transformer).values
elif isinstance(testx, np.ndarray):
outs = pd.Series(testx).map(self.inverse_transformer).values
else:
outs = testx[:]
return outs
#################################################################################
from sklearn.impute import SimpleImputer
def data_transform(X_train, Y_train, X_test="", Y_test="", modeltype='Classification',
multi_label=False, enc_method='label', scaler = StandardScaler()):
##### Use My_Label_Encoder to transform label targets if needed #####
if multi_label:
if modeltype != 'Regression':
targets = Y_train.columns
Y_train_encoded = copy.deepcopy(Y_train)
for each_target in targets:
if Y_train[each_target].dtype not in ['int64', 'int32','int16','int8', 'float16','float32','float64','float']:
mlb = My_LabelEncoder()
Y_train_encoded[each_target] = mlb.fit_transform(Y_train[each_target])
if not isinstance(Y_test, str):
Y_test_encoded= mlb.transform(Y_test)
else:
Y_test_encoded = copy.deepcopy(Y_test)
else:
Y_train_encoded = copy.deepcopy(Y_train)
Y_test_encoded = copy.deepcopy(Y_test)
else:
Y_train_encoded = copy.deepcopy(Y_train)
Y_test_encoded = copy.deepcopy(Y_test)
else:
if modeltype != 'Regression':
if Y_train.dtype not in ['int64', 'int32','int16','int8', 'float16','float32','float64','float']:
mlb = My_LabelEncoder()
Y_train_encoded= mlb.fit_transform(Y_train)
if not isinstance(Y_test, str):
Y_test_encoded= mlb.transform(Y_test)
else:
Y_test_encoded = copy.deepcopy(Y_test)
else:
Y_train_encoded = copy.deepcopy(Y_train)
Y_test_encoded = copy.deepcopy(Y_test)
else:
Y_train_encoded = copy.deepcopy(Y_train)
Y_test_encoded = copy.deepcopy(Y_test)
#### This is where we find datetime vars and convert them to strings ####
datetime_feats = X_train.select_dtypes(include='datetime').columns.tolist()
### if there are datetime values, convert them into features here ###
from .featurewiz import FE_create_time_series_features
for date_col in datetime_feats:
fillnum = X_train[date_col].mode()[0]
X_train[date_col].fillna(fillnum,inplace=True)
X_train, ts_adds = FE_create_time_series_features(X_train, date_col)
if not isinstance(X_test, str):
X_test[date_col].fillna(fillnum,inplace=True)
X_test, _ = FE_create_time_series_features(X_test, date_col, ts_adds)
print(' Adding time series features from %s to data...' %date_col)
####### Set up feature to encode ####################
##### First make sure that the originals are not modified ##########
X_train_encoded = copy.deepcopy(X_train)
X_test_encoded = copy.deepcopy(X_test)
feature_to_encode = X_train.select_dtypes(include='object').columns.tolist(
)+X_train.select_dtypes(include='category').columns.tolist()
#### Do label encoding now #################
if enc_method == 'label':
for feat in feature_to_encode:
# Initia the encoder model
lbEncoder = My_LabelEncoder()
fillnum = X_train[feat].mode()[0]
X_train[feat].fillna(fillnum,inplace=True)
# fit the train data
lbEncoder.fit(X_train[feat])
# transform training set
X_train_encoded[feat] = lbEncoder.transform(X_train[feat])
# transform test set
if not isinstance(X_test_encoded, str):
X_test[feat].fillna(fillnum,inplace=True)
X_test_encoded[feat] = lbEncoder.transform(X_test[feat])
elif enc_method == 'glmm':
# Initialize the encoder model
GLMMEncoder = ce.glmm.GLMMEncoder(verbose=0 ,binomial_target=False)
# fit the train data
GLMMEncoder.fit(X_train[feature_to_encode],Y_train_encoded)
# transform training set ####
X_train_encoded[feature_to_encode] = GLMMEncoder.transform(X_train[feature_to_encode])
# transform test set
if not isinstance(X_test_encoded, str):
X_test_encoded[feature_to_encode] = GLMMEncoder.transform(X_test[feature_to_encode])
else:
print('No encoding transform performed')
### make sure there are no missing values ###
try:
imputer = SimpleImputer(strategy='constant', fill_value=0, verbose=0, add_indicator=True)
imputer.fit_transform(X_train_encoded)
if not isinstance(X_test_encoded, str):
imputer.transform(X_test_encoded)
except:
X_train_encoded = X_train_encoded.fillna(0)
if not isinstance(X_test_encoded, str):
X_test_encoded = X_test_encoded.fillna(0)
# fit the scaler to the entire train and transform the test set
scaler.fit(X_train_encoded)
# transform training set
X_train_scaled = pd.DataFrame(scaler.transform(X_train_encoded),
columns=X_train_encoded.columns, index=X_train_encoded.index)
# transform test set
if not isinstance(X_test_encoded, str):
X_test_scaled = pd.DataFrame(scaler.transform(X_test_encoded),
columns=X_test_encoded.columns, index=X_test_encoded.index)
else:
X_test_scaled = ""
return X_train_scaled, Y_train_encoded, X_test_scaled, Y_test_encoded
##################################################################################
from sklearn.model_selection import KFold, cross_val_score,StratifiedKFold
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize
import csv
import re
from xgboost import XGBRegressor, XGBClassifier
from sklearn.metrics import mean_squared_log_error, mean_squared_error,balanced_accuracy_score
from scipy import stats
from sklearn.model_selection import RandomizedSearchCV
import scipy as sp
import time
##################################################################################
import lightgbm as lgbm
def lightgbm_model_fit(random_search_flag, x_train, y_train, x_test, y_test, modeltype,
multi_label, log_y, model=""):
start_time = time.time()
if multi_label:
rand_params = {
}
else:
rand_params = {
'learning_rate': sp.stats.uniform(scale=1),
'num_leaves': sp.stats.randint(20, 100),
'n_estimators': sp.stats.randint(100,500),
"max_depth": sp.stats.randint(3, 15),
}
if modeltype == 'Regression':
lgb = lgbm.LGBMRegressor()
objective = 'regression'
metric = 'rmse'
is_unbalance = False
class_weight = None
score_name = 'Score'
else:
if modeltype =='Binary_Classification':
lgb = lgbm.LGBMClassifier()
objective = 'binary'
metric = 'auc'
is_unbalance = True
class_weight = None
score_name = 'ROC AUC'
num_class = 1
else:
lgb = lgbm.LGBMClassifier()
objective = 'multiclass'
#objective = 'multiclassova'
metric = 'multi_logloss'
is_unbalance = True
class_weight = 'balanced'
score_name = 'Multiclass Logloss'
if multi_label:
if isinstance(y_train, np.ndarray):
num_class = np.unique(y_train).max() + 1
else:
num_class = y_train.nunique().max()
else:
if isinstance(y_train, np.ndarray):
num_class = np.unique(y_train).max() + 1
else:
num_class = y_train.nunique()
early_stopping_params={"early_stopping_rounds":10,
"eval_metric" : metric,
"eval_set" : [[x_test, y_test]],
}
if modeltype == 'Regression':
## there is no num_class in regression for LGBM model ##
lgbm_params = {'learning_rate': 0.001,
'objective': objective,
'metric': metric,
'boosting_type': 'gbdt',
'max_depth': 8,
'subsample': 0.2,
'colsample_bytree': 0.3,
'reg_alpha': 0.54,
'reg_lambda': 0.4,
'min_split_gain': 0.7,
'min_child_weight': 26,
'num_leaves': 32,
'save_binary': True,
'seed': 1337, 'feature_fraction_seed': 1337,
'bagging_seed': 1337, 'drop_seed': 1337,
'data_random_seed': 1337,
'verbose': -1,
'n_estimators': 400,
}
else:
lgbm_params = {'learning_rate': 0.001,
'objective': objective,
'metric': metric,
'boosting_type': 'gbdt',
'max_depth': 8,
'subsample': 0.2,
'colsample_bytree': 0.3,
'reg_alpha': 0.54,
'reg_lambda': 0.4,
'min_split_gain': 0.7,
'min_child_weight': 26,
'num_leaves': 32,
'save_binary': True,
'seed': 1337, 'feature_fraction_seed': 1337,
'bagging_seed': 1337, 'drop_seed': 1337,
'data_random_seed': 1337,
'verbose': -1,
'num_class': num_class,
'is_unbalance': is_unbalance,
'class_weight': class_weight,
'n_estimators': 400,
}
lgb.set_params(**lgbm_params)
if multi_label:
if modeltype == 'Regression':
lgb = MultiOutputRegressor(lgb)
else:
lgb = MultiOutputClassifier(lgb)
######## Now let's perform randomized search to find best hyper parameters ######
if random_search_flag:
if modeltype == 'Regression':
scoring = 'neg_mean_squared_error'
else:
scoring = 'precision'
model = RandomizedSearchCV(lgb,
param_distributions = rand_params,
n_iter = 10,
return_train_score = True,
random_state = 99,
n_jobs=-1,
cv = 3,
refit=True,
scoring = scoring,
verbose = False)
##### This is where we search for hyper params for model #######
if multi_label:
model.fit(x_train, y_train)
else:
model.fit(x_train, y_train, **early_stopping_params)
print('Time taken for Hyper Param tuning of LGBM (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
cv_results = pd.DataFrame(model.cv_results_)
if modeltype == 'Regression':
print('Mean cross-validated train %s = %0.04f' %(score_name, np.sqrt(abs(cv_results['mean_train_score'].mean()))))
print('Mean cross-validated test %s = %0.04f' %(score_name, np.sqrt(abs(cv_results['mean_test_score'].mean()))))
else:
print('Mean cross-validated test %s = %0.04f' %(score_name, cv_results['mean_train_score'].mean()))
print('Mean cross-validated test %s = %0.04f' %(score_name, cv_results['mean_test_score'].mean()))
else:
try:
model.fit(x_train, y_train, verbose=-1)
except:
print('lightgbm model is crashing. Please check your inputs and try again...')
return model
##############################################################################################
def complex_XGBoost_model(X_train, y_train, X_test, log_y=False, GPU_flag=False,
scaler = '', enc_method='label', n_splits=5, verbose=-1):
"""
This model is called complex because it handle multi-label, mulit-class datasets which XGBoost ordinarily cant.
Just send in X_train, y_train and what you want to predict, X_test
It will automatically split X_train into multiple folds (10) and train and predict each time on X_test.
It will then use average (or use mode) to combine the results and give you a y_test.
It will automatically detect modeltype as "Regression" or 'Classification'
It will also add MultiOutputClassifier and MultiOutputRegressor to multi_label problems.
The underlying estimators in all cases is XGB. So you get the best of both worlds.
Inputs:
------------
X_train: pandas dataframe only: do not send in numpy arrays. This is the X_train of your dataset.
y_train: pandas Series or DataFrame only: do not send in numpy arrays. This is the y_train of your dataset.
X_test: pandas dataframe only: do not send in numpy arrays. This is the X_test of your dataset.
log_y: default = False: If True, it means use the log of the target variable "y" to train and test.
GPU_flag: if your machine has a GPU set this flag and it will use XGBoost GPU to speed up processing.
scaler : default is empty string which means to use StandardScaler.
But you can explicity send in "minmax' to select MinMaxScaler().
Alternatively, you can send in a scaler object that you define here: MaxAbsScaler(), etc.
enc_method: default is 'label' encoding. But you can choose 'glmm' as an alternative. But those are the only two.
verbose: default = 0. Choosing 1 will give you lot more output.
Outputs:
------------
y_preds: Predicted values for your X_XGB_test dataframe.
It has been averaged after repeatedly predicting on X_XGB_test. So likely to be better than one model.
"""
X_XGB = copy.deepcopy(X_train)
Y_XGB = copy.deepcopy(y_train)
X_XGB_test = copy.deepcopy(X_test)
####################################
start_time = time.time()
top_num = 10
num_boost_round = 400
if isinstance(Y_XGB, pd.Series):
targets = [Y_XGB.name]
else:
targets = Y_XGB.columns.tolist()
if len(targets) == 1:
multi_label = False
if isinstance(Y_XGB, pd.DataFrame):
Y_XGB = pd.Series(Y_XGB.values.ravel(),name=targets[0], index=Y_XGB.index)
else:
multi_label = True
modeltype = analyze_problem_type(Y_XGB, targets)
columns = X_XGB.columns
###################################################################################
######### S C A L E R P R O C E S S I N G B E G I N S ############
###################################################################################
if isinstance(scaler, str):
if not scaler == '':
scaler = scaler.lower()
if scaler == 'standard':
scaler = StandardScaler()
elif scaler == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
else:
scaler = StandardScaler()
else:
pass
######### G P U P R O C E S S I N G B E G I N S ############
###### This is where we set the CPU and GPU parameters for XGBoost
if GPU_flag:
GPU_exists = check_if_GPU_exists()
else:
GPU_exists = False
##### Set the Scoring Parameters here based on each model and preferences of user ###
cpu_params = {}
param = {}
cpu_params['tree_method'] = 'hist'
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
if GPU_exists:
param['tree_method'] = 'gpu_hist'
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
print(' Hyper Param Tuning XGBoost with GPU parameters. This will take time. Please be patient...')
else:
param = copy.deepcopy(cpu_params)
print(' Hyper Param Tuning XGBoost with CPU parameters. This will take time. Please be patient...')
#################################################################################
if modeltype == 'Regression':
if log_y:
Y_XGB.loc[Y_XGB==0] = 1e-15 ### just set something that is zero to a very small number
######### Now set the number of rows we need to tune hyper params ###
scoreFunction = { "precision": "precision_weighted","recall": "recall_weighted"}
random_search_flag = True
#### We need a small validation data set for hyper-param tuning #########################
hyper_frac = 0.2
#### now select a random sample from X_XGB ##
if modeltype == 'Regression':
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=999)
else:
try:
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=999, stratify = Y_XGB)
except:
## In some small cases there are too few samples to stratify hence just split them as is
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=999)
###### This step is needed for making sure y is transformed to log_y ####################
if modeltype == 'Regression' and log_y:
Y_train = np.log(Y_train)
Y_valid = np.log(Y_valid)
#### First convert test data into numeric using train data ###
X_train, Y_train, X_valid, Y_valid = data_transform(X_train, Y_train, X_valid, Y_valid,
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
###### Time to hyper-param tune model using randomizedsearchcv and partial train data #########
num_boost_round = xgbm_model_fit(random_search_flag, X_train, Y_train, X_valid, Y_valid, modeltype,
multi_label, log_y, num_boost_round=num_boost_round)
#### First convert test data into numeric using train data ###############################
if not isinstance(X_XGB_test, str):
x_train, y_train, x_test, _ = data_transform(X_XGB, Y_XGB, X_XGB_test, "",
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
###### Time to train the hyper-tuned model on full train data ##########################
random_search_flag = False
model = xgbm_model_fit(random_search_flag, x_train, y_train, x_test, "", modeltype,
multi_label, log_y, num_boost_round=num_boost_round)
############# Time to get feature importances based on full train data ################
if multi_label:
for i,target_name in enumerate(targets):
each_model = model.estimators_[i]
imp_feats = dict(zip(x_train.columns, each_model.feature_importances_))
importances = pd.Series(imp_feats).sort_values(ascending=False)[:top_num].values
important_features = pd.Series(imp_feats).sort_values(ascending=False)[:top_num].index.tolist()
print('Top 10 features for {}: {}'.format(target_name, important_features))
else:
imp_feats = model.get_score(fmap='', importance_type='gain')
importances = pd.Series(imp_feats).sort_values(ascending=False)[:top_num].values
important_features = pd.Series(imp_feats).sort_values(ascending=False)[:top_num].index.tolist()
print('Top 10 features:\n%s' %important_features[:top_num])
####### order this in the same order in which they were collected ######
feature_importances = pd.DataFrame(importances,
index = important_features,
columns=['importance'])
###### Time to consolidate the predictions on test data ################################
if not multi_label and not isinstance(X_XGB_test, str):
x_test = xgb.DMatrix(x_test)
if isinstance(X_XGB_test, str):
print('No predictions since X_XGB_test is empty string. Returning...')
return {}
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
if log_y:
pred_xgbs = np.exp(model.predict(x_test))
else:
pred_xgbs = model.predict(x_test)
#### if there is no test data just return empty strings ###
else:
pred_xgbs = []
else:
if multi_label:
pred_xgbs = model.predict(x_test)
pred_probas = model.predict_proba(x_test)
else:
pred_probas = model.predict(x_test)
if modeltype =='Multi_Classification':
pred_xgbs = pred_probas.argmax(axis=1)
else:
pred_xgbs = (pred_probas>0.5).astype(int)
##### once the entire model is trained on full train data ##################
print(' Time taken for training XGBoost on entire train data (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
if multi_label:
for i,target_name in enumerate(targets):
each_model = model.estimators_[i]
xgb.plot_importance(each_model, importance_type='gain', max_num_features=top_num,
title='XGBoost model feature importances for %s' %target_name)
else:
xgb.plot_importance(model, importance_type='gain', max_num_features=top_num,
title='XGBoost final model feature importances')
print('Returning the following:')
print(' Model = %s' %model)
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
print(' final predictions', pred_xgbs[:10])
return (pred_xgbs, model)
else:
if not isinstance(X_XGB_test, str):
print(' final predictions (may need to be transformed to original labels)', pred_xgbs[:10])
print(' predicted probabilities', pred_probas[:1])
return (pred_xgbs, pred_probas, model)
##############################################################################################
import xgboost as xgb
def xgbm_model_fit(random_search_flag, x_train, y_train, x_test, y_test, modeltype,
multi_label, log_y, num_boost_round=100):
start_time = time.time()
if multi_label and not random_search_flag:
model = num_boost_round
else:
rand_params = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 100),
'n_estimators': sp.stats.randint(100,500),
"max_depth": sp.stats.randint(3, 15),
}
if modeltype == 'Regression':
objective = 'reg:squarederror'
eval_metric = 'rmse'
shuffle = False
stratified = False
num_class = 0
score_name = 'Score'
scale_pos_weight = 1
else:
if modeltype =='Binary_Classification':
objective='binary:logistic'
eval_metric = 'auc' ## dont change this. AUC works well.
shuffle = True
stratified = True
num_class = 1
score_name = 'AUC'
scale_pos_weight = get_scale_pos_weight(y_train)
else:
objective = 'multi:softprob'
eval_metric = 'auc' ## dont change this. AUC works well for now.
shuffle = True
stratified = True
if multi_label:
num_class = y_train.nunique().max()
else:
if isinstance(y_train, np.ndarray):
num_class = np.unique(y_train).max() + 1
elif isinstance(y_train, pd.Series):
num_class = y_train.nunique()
else:
num_class = y_train.nunique().max()
score_name = 'Multiclass AUC'
scale_pos_weight = 1 ### use sample_weights in multi-class settings ##
######################################################
final_params = {
'booster' :'gbtree',
'colsample_bytree': 0.5,
'alpha': 0.015,
'gamma': 4,
'learning_rate': 0.01,
'max_depth': 8,
'min_child_weight': 2,
'reg_lambda': 0.5,
'subsample': 0.7,
'random_state': 99,
'objective': objective,
'eval_metric': eval_metric,
'verbosity': 0,
'n_jobs': -1,
'scale_pos_weight':scale_pos_weight,
'num_class': num_class,
'silent': True
}
####### This is where we split into single and multi label ############
if multi_label:
###### This is for Multi_Label problems ############
rand_params = {'estimator__learning_rate':[0.1, 0.5, 0.01, 0.05],
'estimator__n_estimators':[50, 100, 150, 200, 250],
'estimator__gamma':[2, 4, 8, 16, 32],
'estimator__max_depth':[3, 5, 8, 12],
}
if random_search_flag:
if modeltype == 'Regression':
clf = XGBRegressor(n_jobs=-1, random_state=999, max_depth=6)
clf.set_params(**final_params)
model = MultiOutputRegressor(clf, n_jobs=-1)
else:
clf = XGBClassifier(n_jobs=-1, random_state=999, max_depth=6)
clf.set_params(**final_params)
model = MultiOutputClassifier(clf, n_jobs=-1)
if modeltype == 'Regression':
scoring = 'neg_mean_squared_error'
else:
scoring = 'precision'
model = RandomizedSearchCV(model,
param_distributions = rand_params,
n_iter = 15,
return_train_score = True,
random_state = 99,
n_jobs=-1,
cv = 3,
refit=True,
scoring = scoring,
verbose = False)
model.fit(x_train, y_train)
print('Time taken for Hyper Param tuning of multi_label XGBoost (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
cv_results = pd.DataFrame(model.cv_results_)
if modeltype == 'Regression':
print('Mean cross-validated train %s = %0.04f' %(score_name, np.sqrt(abs(cv_results['mean_train_score'].mean()))))
print('Mean cross-validated test %s = %0.04f' %(score_name, np.sqrt(abs(cv_results['mean_test_score'].mean()))))
else:
print('Mean cross-validated test %s = %0.04f' %(score_name, cv_results['mean_train_score'].mean()))
print('Mean cross-validated test %s = %0.04f' %(score_name, cv_results['mean_test_score'].mean()))
### In this case, there is no boost rounds so just return the default num_boost_round
return model.best_estimator_
else:
try:
model.fit(x_train, y_train)
except:
print('Multi_label XGBoost model is crashing during training. Please check your inputs and try again...')
return model
else:
#### This is for Single Label Problems #############
if modeltype == 'Multi_Classification':
wt_array = get_sample_weight_array(y_train)
dtrain = xgb.DMatrix(x_train, label=y_train, weight=wt_array)
else:
dtrain = xgb.DMatrix(x_train, label=y_train)
######## Now let's perform randomized search to find best hyper parameters ######
if random_search_flag:
cv_results = xgb.cv(final_params, dtrain, num_boost_round=400, nfold=5,
stratified=stratified, metrics=eval_metric, early_stopping_rounds=10, seed=999, shuffle=shuffle)
# Update best eval_metric
best_eval = 'test-'+eval_metric+'-mean'
if modeltype == 'Regression':
mean_mae = cv_results[best_eval].min()
boost_rounds = cv_results[best_eval].argmin()
else:
mean_mae = cv_results[best_eval].max()
boost_rounds = cv_results[best_eval].argmax()
print("Cross-validated %s = %0.3f in num rounds = %s" %(score_name, mean_mae, boost_rounds))
print('Time taken for Hyper Param tuning of XGBoost (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
return boost_rounds
else:
try:
model = xgb.train(
final_params,
dtrain,
num_boost_round=num_boost_round,
verbose_eval=False,
)
except:
print('XGBoost model is crashing. Please check your inputs and try again...')
return model
####################################################################################
# Calculate class weight
from sklearn.utils.class_weight import compute_class_weight
import copy
from collections import Counter
def find_rare_class(classes, verbose=0):
######### Print the % count of each class in a Target variable #####
"""
Works on Multi Class too. Prints class percentages count of target variable.
It returns the name of the Rare class (the one with the minimum class member count).
This can also be helpful in using it as pos_label in Binary and Multi Class problems.
"""
counts = OrderedDict(Counter(classes))
total = sum(counts.values())
if verbose >= 1:
print(' Class -> Counts -> Percent')
sorted_keys = sorted(counts.keys())
for cls in sorted_keys:
print("%12s: % 7d -> % 5.1f%%" % (cls, counts[cls], counts[cls]/total*100))
if type(pd.Series(counts).idxmin())==str:
return pd.Series(counts).idxmin()
else:
return int(pd.Series(counts).idxmin())
###################################################################################
def get_sample_weight_array(y_train):
y_train = copy.deepcopy(y_train)
if isinstance(y_train, np.ndarray):
y_train = pd.Series(y_train)
elif isinstance(y_train, pd.Series):
pass
elif isinstance(y_train, pd.DataFrame):
### if it is a dataframe, return only if it s one column dataframe ##
y_train = y_train.iloc[:,0]
else:
### if you cannot detect the type or if it is a multi-column dataframe, ignore it
return None
classes = np.unique(y_train)
class_weights = compute_class_weight('balanced', classes=classes, y=y_train)
if len(class_weights[(class_weights < 1)]) > 0:
### if the weights are less than 1, then divide them until the lowest weight is 1.
class_weights = class_weights/min(class_weights)
else:
class_weights = (class_weights)
### even after you change weights if they are all below 1.5 do this ##
#if (class_weights<=1.5).all():
# class_weights = np.around(class_weights+0.49)
class_weights = class_weights.astype(int)
wt = dict(zip(classes, class_weights))
### Map class weights to corresponding target class values
### You have to make sure class labels have range (0, n_classes-1)
wt_array = y_train.map(wt)
#set(zip(y_train, wt_array))
# Convert wt series to wt array
wt_array = wt_array.values
return wt_array
###############################################################################
from collections import OrderedDict
def get_scale_pos_weight(y_input):
y_input = copy.deepcopy(y_input)
if isinstance(y_input, np.ndarray):
y_input = pd.Series(y_input)
elif isinstance(y_input, pd.Series):
pass
elif isinstance(y_input, pd.DataFrame):
### if it is a dataframe, return only if it s one column dataframe ##
y_input = y_input.iloc[:,0]
else:
### if you cannot detect the type or if it is a multi-column dataframe, ignore it
return None
classes = np.unique(y_input)
rare_class = find_rare_class(y_input)
xp = Counter(y_input)
class_weights = compute_class_weight('balanced', classes=classes, y=y_input)
if len(class_weights[(class_weights < 1)]) > 0:
### if the weights are less than 1, then divide them until the lowest weight is 1.
class_weights = class_weights/min(class_weights)
else:
class_weights = (class_weights)
### even after you change weights if they are all below 1.5 do this ##
#if (class_weights<=1.5).all():
# class_weights = np.around(class_weights+0.49)
class_weights = class_weights.astype(int)
class_weights[(class_weights<1)]=1
class_rows = class_weights*[xp[x] for x in classes]
class_rows = class_rows.astype(int)
class_weighted_rows = dict(zip(classes,class_weights))
rare_class_weight = class_weighted_rows[rare_class]
print(' For class %s, weight = %s' %(rare_class, rare_class_weight))
return rare_class_weight
############################################################################################
def xgboost_model_fit(model, x_train, y_train, x_test, y_test, modeltype, log_y, params,
cpu_params, early_stopping_params={}):
early_stopping = 10
start_time = time.time()
if str(model).split("(")[0] == 'RandomizedSearchCV':
model.fit(x_train, y_train, **early_stopping_params)
print('Time taken for Hyper Param tuning of XGB (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
else:
try:
if modeltype == 'Regression':
if log_y:
model.fit(x_train, np.log(y_train), early_stopping_rounds=early_stopping, eval_metric=['rmse'],
eval_set=[(x_test, np.log(y_test))], verbose=0)
else:
model.fit(x_train, y_train, early_stopping_rounds=early_stopping, eval_metric=['rmse'],
eval_set=[(x_test, y_test)], verbose=0)
else:
if modeltype == 'Binary_Classification':
objective='binary:logistic'
eval_metric = 'auc'
else:
objective='multi:softprob'
eval_metric = 'auc'
model.fit(x_train, y_train, early_stopping_rounds=early_stopping, eval_metric = eval_metric,
eval_set=[(x_test, y_test)], verbose=0)
except:
print('GPU is present but not turned on. Please restart after that. Currently using CPU...')
if str(model).split("(")[0] == 'RandomizedSearchCV':
xgb = model.estimator_
xgb.set_params(**cpu_params)
if modeltype == 'Regression':
scoring = 'neg_mean_squared_error'
else:
scoring = 'precision'
model = RandomizedSearchCV(xgb,
param_distributions = params,
n_iter = 15,
n_jobs=-1,
cv = 3,
scoring=scoring,
refit=True,
)
model.fit(x_train, y_train, **early_stopping_params)
return model
else:
model = model.set_params(**cpu_params)
if modeltype == 'Regression':
if log_y:
model.fit(x_train, np.log(y_train), early_stopping_rounds=6, eval_metric=['rmse'],
eval_set=[(x_test, np.log(y_test))], verbose=0)
else:
model.fit(x_train, y_train, early_stopping_rounds=6, eval_metric=['rmse'],
eval_set=[(x_test, y_test)], verbose=0)
else:
model.fit(x_train, y_train, early_stopping_rounds=6,eval_metric=eval_metric,
eval_set=[(x_test, y_test)], verbose=0)
return model
#################################################################################
def simple_XGBoost_model(X_train, y_train, X_test, log_y=False, GPU_flag=False,
scaler = '', enc_method='label', n_splits=5, verbose=0):
"""
Easy to use XGBoost model. Just send in X_train, y_train and what you want to predict, X_test
It will automatically split X_train into multiple folds (10) and train and predict each time on X_test.
It will then use average (or use mode) to combine the results and give you a y_test.
You just need to give the modeltype as "Regression" or 'Classification'
Inputs:
------------
X_train: pandas dataframe only: do not send in numpy arrays. This is the X_train of your dataset.
y_train: pandas Series or DataFrame only: do not send in numpy arrays. This is the y_train of your dataset.
X_test: pandas dataframe only: do not send in numpy arrays. This is the X_test of your dataset.
modeltype: can only be 'Regression' or 'Classification'
log_y: default = False: If True, it means use the log of the target variable "y" to train and test.
GPU_flag: if your machine has a GPU set this flag and it will use XGBoost GPU to speed up processing.
scaler : default is StandardScaler(). But you can send in MinMaxScaler() as input to change it or any other scaler.
enc_method: default is 'label' encoding. But you can choose 'glmm' as an alternative. But those are the only two.
verbose: default = 0. Choosing 1 will give you lot more output.
Outputs:
------------
y_preds: Predicted values for your X_XGB_test dataframe.
It has been averaged after repeatedly predicting on X_XGB_test. So likely to be better than one model.
"""
X_XGB = copy.deepcopy(X_train)
Y_XGB = copy.deepcopy(y_train)
X_XGB_test = copy.deepcopy(X_test)
start_time = time.time()
if isinstance(Y_XGB, pd.Series):
targets = [Y_XGB.name]
else:
targets = Y_XGB.columns.tolist()
Y_XGB_index = Y_XGB.index
if len(targets) == 1:
multi_label = False
if isinstance(Y_XGB, pd.DataFrame):
Y_XGB = pd.Series(Y_XGB.values.ravel(),name=targets[0], index=Y_XGB.index)
else:
multi_label = True
print('Multi_label is not supported in simple_XGBoost_model. Try the complex_XGBoost_model...Returning')
return {}
##### Start your analysis of the data ############
modeltype = analyze_problem_type(Y_XGB, targets)
columns = X_XGB.columns
###################################################################################
######### S C A L E R P R O C E S S I N G B E G I N S ############
###################################################################################
if isinstance(scaler, str):
if not scaler == '':
scaler = scaler.lower()
if scaler == 'standard':
scaler = StandardScaler()
elif scaler == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
else:
scaler = StandardScaler()
else:
pass
######### G P U P R O C E S S I N G B E G I N S ############
###### This is where we set the CPU and GPU parameters for XGBoost
if GPU_flag:
GPU_exists = check_if_GPU_exists()
else:
GPU_exists = False
##### Set the Scoring Parameters here based on each model and preferences of user ###
cpu_params = {}
param = {}
cpu_params['tree_method'] = 'hist'
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
if GPU_exists:
param['tree_method'] = 'gpu_hist'
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
print(' Hyper Param Tuning XGBoost with GPU parameters. This will take time. Please be patient...')
else:
param = copy.deepcopy(cpu_params)
print(' Hyper Param Tuning XGBoost with CPU parameters. This will take time. Please be patient...')
#################################################################################
if modeltype == 'Regression':
if log_y:
Y_XGB.loc[Y_XGB==0] = 1e-15 ### just set something that is zero to a very small number
xgb = XGBRegressor(
booster = 'gbtree',
colsample_bytree=0.5,
alpha=0.015,
gamma=4,
learning_rate=0.01,
max_depth=8,
min_child_weight=2,
n_estimators=1000,
reg_lambda=0.5,
#reg_alpha=8,
subsample=0.7,
random_state=99,
objective='reg:squarederror',
eval_metric='rmse',
verbosity = 0,
n_jobs=-1,
#grow_policy='lossguide',
silent = True)
objective='reg:squarederror'
eval_metric = 'rmse'
score_name = 'RMSE'
else:
if multi_label:
num_class = Y_XGB.nunique().max()
else:
if isinstance(Y_XGB, np.ndarray):
num_class = np.unique(Y_XGB).max() + 1
else:
num_class = Y_XGB.nunique()
if num_class == 2:
num_class = 1
if num_class <= 2:
objective='binary:logistic'
eval_metric = 'auc'
score_name = 'ROC AUC'
else:
objective='multi:softprob'
eval_metric = 'auc'
score_name = 'Multiclass ROC AUC'
xgb = XGBClassifier(
booster = 'gbtree',
colsample_bytree=0.5,
alpha=0.015,
gamma=4,
learning_rate=0.01,
max_depth=8,
min_child_weight=2,
n_estimators=1000,
reg_lambda=0.5,
objective=objective,
subsample=0.7,
random_state=99,
n_jobs=-1,
#grow_policy='lossguide',
num_class = num_class,
verbosity = 0,
silent = True)
#testing for GPU
model = xgb.set_params(**param)
hyper_frac = 0.2
#### now select a random sample from X_XGB and Y_XGB ################
if modeltype == 'Regression':
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=99)
else:
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=99, stratify=Y_XGB)
scoreFunction = { "precision": "precision_weighted","recall": "recall_weighted"}
params = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,500),
"max_depth": sp.stats.randint(3, 15),
}
early_stopping_params={"early_stopping_rounds":5,
"eval_metric" : eval_metric,
"eval_set" : [[X_valid, Y_valid]]
}
if modeltype == 'Regression':
scoring = 'neg_mean_squared_error'
else:
scoring = 'precision'
model = RandomizedSearchCV(xgb.set_params(**param),
param_distributions = params,
n_iter = 15,
return_train_score = True,
random_state = 99,
n_jobs=-1,
cv = 3,
refit=True,
scoring=scoring,
verbose = False)
X_train, Y_train, X_valid, Y_valid = data_transform(X_train, Y_train, X_valid, Y_valid,
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
gbm_model = xgboost_model_fit(model, X_train, Y_train, X_valid, Y_valid, modeltype,
log_y, params, cpu_params, early_stopping_params)
#############################################################################
ls=[]
if modeltype == 'Regression':
fold = KFold(n_splits=n_splits)
else:
fold = StratifiedKFold(shuffle=True, n_splits=n_splits, random_state=99)
scores=[]
if not isinstance(X_XGB_test, str):
pred_xgbs = np.zeros(len(X_XGB_test))
pred_probas = np.zeros(len(X_XGB_test))
else:
pred_xgbs = []
pred_probas = []
#### First convert test data into numeric using train data ###
if not isinstance(X_XGB_test, str):
X_XGB_train_enc, Y_XGB, X_XGB_test_enc, _ = data_transform(X_XGB, Y_XGB, X_XGB_test,"",
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
else:
X_XGB_train_enc, Y_XGB, X_XGB_test_enc, _ = data_transform(X_XGB, Y_XGB, "","",
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
#### now run all the folds each one by one ##################################
start_time = time.time()
for folds, (train_index, test_index) in tqdm(enumerate(fold.split(X_XGB,Y_XGB))):
x_train, x_valid = X_XGB.iloc[train_index], X_XGB.iloc[test_index]
### you need to keep y_valid as-is in the same original state as it was given ####
if isinstance(Y_XGB, np.ndarray):
Y_XGB = pd.Series(Y_XGB,name=targets[0], index=Y_XGB_index)
### y_valid here will be transformed into log_y to ensure training and validation ####
if modeltype == 'Regression':
if log_y:
y_train, y_valid = np.log(Y_XGB.iloc[train_index]), np.log(Y_XGB.iloc[test_index])
else:
y_train, y_valid = Y_XGB.iloc[train_index], Y_XGB.iloc[test_index]
else:
y_train, y_valid = Y_XGB.iloc[train_index], Y_XGB.iloc[test_index]
## scale the x_train and x_valid values - use all columns -
x_train, y_train, x_valid, y_valid = data_transform(x_train, y_train, x_valid, y_valid,
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
model = gbm_model.best_estimator_
model = xgboost_model_fit(model, x_train, y_train, x_valid, y_valid, modeltype,
log_y, params, cpu_params)
#### now make predictions on validation data and compare it to y_valid which is in original state ##
if modeltype == 'Regression':
if log_y:
preds = np.exp(model.predict(x_valid))
else:
preds = model.predict(x_valid)
else:
preds = model.predict(x_valid)
feature_importances = pd.DataFrame(model.feature_importances_,
index = X_XGB.columns,
columns=['importance'])
sum_all=feature_importances.values
ls.append(sum_all)
###### Time to consolidate the predictions on test data #########
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
if log_y:
pred_xgb=np.exp(model.predict(X_XGB_test_enc[columns]))
else:
pred_xgb=model.predict(X_XGB_test_enc[columns])
pred_xgbs = np.vstack([pred_xgbs, pred_xgb])
pred_xgbs = pred_xgbs.mean(axis=0)
#### preds here is for only one fold and we are comparing it to original y_valid ####
score = np.sqrt(mean_squared_error(y_valid, preds))
print('%s score in fold %d = %s' %(score_name, folds+1, score))
else:
if not isinstance(X_XGB_test, str):
pred_xgb=model.predict(X_XGB_test_enc[columns])
pred_proba = model.predict_proba(X_XGB_test_enc[columns])
if folds == 0:
pred_xgbs = copy.deepcopy(pred_xgb)
pred_probas = copy.deepcopy(pred_proba)
else:
pred_xgbs = np.vstack([pred_xgbs, pred_xgb])
pred_xgbs = stats.mode(pred_xgbs, axis=0)[0][0]
pred_probas = np.mean( np.array([ pred_probas, pred_proba ]), axis=0 )
#### preds here is for only one fold and we are comparing it to original y_valid ####
score = balanced_accuracy_score(y_valid, preds)
print('%s score in fold %d = %0.1f%%' %(score_name, folds+1, score*100))
scores.append(score)
print(' Time taken for Cross Validation of XGBoost (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
print("\nCross-validated Average scores are: ", np.sum(scores)/len(scores))
##### Train on full train data set and predict #################################
print('Training model on full train dataset...')
start_time1 = time.time()
model = gbm_model.best_estimator_
model.fit(X_XGB_train_enc, Y_XGB)
if not isinstance(X_XGB_test, str):
pred_xgbs = model.predict(X_XGB_test_enc)
if modeltype != 'Regression':
pred_probas = model.predict_proba(X_XGB_test_enc)
else:
pred_probas = np.array([])
else:
pred_xgbs = np.array([])
pred_probas = np.array([])
print(' Time taken for training XGBoost (in minutes) = %0.1f' %((time.time()-start_time1)/60))
if verbose:
plot_importances_XGB(train_set=X_XGB, labels=Y_XGB, ls=ls, y_preds=pred_xgbs,
modeltype=modeltype, top_num='all')
print('Returning the following:')
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
print(' final predictions', pred_xgbs[:10])
else:
print(' no X_test given. Returning empty array.')
print(' Model = %s' %model)
return (pred_xgbs, model)
else:
if not isinstance(X_XGB_test, str):
print(' final predictions (may need to be transformed to original labels)', pred_xgbs[:10])
print(' predicted probabilities', pred_probas[:1])
else:
print(' no X_test given. Returning empty array.')
print(' Model = %s' %model)
return (pred_xgbs, pred_probas, model)
##################################################################################
def complex_LightGBM_model(X_train, y_train, X_test, log_y=False, GPU_flag=False,
scaler = '', enc_method='label', n_splits=5, verbose=-1):
"""
This model is called complex because it handle multi-label, mulit-class datasets which LGBM ordinarily cant.
Just send in X_train, y_train and what you want to predict, X_test
It will automatically split X_train into multiple folds (10) and train and predict each time on X_test.
It will then use average (or use mode) to combine the results and give you a y_test.
It will automatically detect modeltype as "Regression" or 'Classification'
It will also add MultiOutputClassifier and MultiOutputRegressor to multi_label problems.
The underlying estimators in all cases is LGBM. So you get the best of both worlds.
Inputs:
------------
X_train: pandas dataframe only: do not send in numpy arrays. This is the X_train of your dataset.
y_train: pandas Series or DataFrame only: do not send in numpy arrays. This is the y_train of your dataset.
X_test: pandas dataframe only: do not send in numpy arrays. This is the X_test of your dataset.
log_y: default = False: If True, it means use the log of the target variable "y" to train and test.
GPU_flag: if your machine has a GPU set this flag and it will use XGBoost GPU to speed up processing.
scaler : default is StandardScaler(). But you can send in MinMaxScaler() as input to change it or any other scaler.
enc_method: default is 'label' encoding. But you can choose 'glmm' as an alternative. But those are the only two.
verbose: default = 0. Choosing 1 will give you lot more output.
Outputs:
------------
y_preds: Predicted values for your X_XGB_test dataframe.
It has been averaged after repeatedly predicting on X_XGB_test. So likely to be better than one model.
"""
X_XGB = copy.deepcopy(X_train)
Y_XGB = copy.deepcopy(y_train)
X_XGB_test = copy.deepcopy(X_test)
####################################
start_time = time.time()
top_num = 10
if isinstance(Y_XGB, pd.Series):
targets = [Y_XGB.name]
else:
targets = Y_XGB.columns.tolist()
if len(targets) == 1:
multi_label = False
if isinstance(Y_XGB, pd.DataFrame):
Y_XGB = pd.Series(Y_XGB.values.ravel(),name=targets[0], index=Y_XGB.index)
else:
multi_label = True
modeltype = analyze_problem_type(Y_XGB, targets)
columns = X_XGB.columns
#### In some cases, there are special chars in column names. Remove them. ###
if np.array([':' in x for x in columns]).any():
sel_preds = columns[np.array([':' in x for x in columns])].tolist()
print('removing special char : in %s since LightGBM does not like it...' %sel_preds)
columns = ["_".join(x.split(":")) for x in columns]
X_XGB.columns = columns
if not isinstance(X_XGB_test, str):
X_XGB_test.columns = columns
###################################################################################
######### S C A L E R P R O C E S S I N G B E G I N S ############
###################################################################################
if isinstance(scaler, str):
if not scaler == '':
scaler = scaler.lower()
if scaler == 'standard':
scaler = StandardScaler()
elif scaler == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
else:
scaler = StandardScaler()
else:
pass
######### G P U P R O C E S S I N G B E G I N S ############
###### This is where we set the CPU and GPU parameters for XGBoost
if GPU_flag:
GPU_exists = check_if_GPU_exists()
else:
GPU_exists = False
##### Set the Scoring Parameters here based on each model and preferences of user ###
cpu_params = {}
param = {}
cpu_params['tree_method'] = 'hist'
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
if GPU_exists:
param['tree_method'] = 'gpu_hist'
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
print(' Hyper Param Tuning LightGBM with GPU parameters. This will take time. Please be patient...')
else:
param = copy.deepcopy(cpu_params)
print(' Hyper Param Tuning LightGBM with CPU parameters. This will take time. Please be patient...')
#################################################################################
if modeltype == 'Regression':
if log_y:
Y_XGB.loc[Y_XGB==0] = 1e-15 ### just set something that is zero to a very small number
######### Now set the number of rows we need to tune hyper params ###
scoreFunction = { "precision": "precision_weighted","recall": "recall_weighted"}
#### We need a small validation data set for hyper-param tuning #############
hyper_frac = 0.2
#### now select a random sample from X_XGB ##
if modeltype == 'Regression':
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=999)
else:
try:
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=999, stratify = Y_XGB)
except:
## In some small cases, you cannot stratify since there are too few samples. So leave it as is ##
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=999)
#### First convert test data into numeric using train data ###
X_train, Y_train, X_valid, Y_valid = data_transform(X_train, Y_train, X_valid, Y_valid,
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
###### This step is needed for making sure y is transformed to log_y ######
if modeltype == 'Regression' and log_y:
Y_train = np.log(Y_train)
Y_valid = np.log(Y_valid)
random_search_flag = True
###### Time to hyper-param tune model using randomizedsearchcv #########
gbm_model = lightgbm_model_fit(random_search_flag, X_train, Y_train, X_valid, Y_valid, modeltype,
multi_label, log_y, model="")
model = gbm_model.best_estimator_
#### First convert test data into numeric using train data ###
if not isinstance(X_XGB_test, str):
x_train, y_train, x_test, _ = data_transform(X_XGB, Y_XGB, X_XGB_test, "",
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
###### Time to train the hyper-tuned model on full train data #########
random_search_flag = False
model = lightgbm_model_fit(random_search_flag, x_train, y_train, x_test, "", modeltype,
multi_label, log_y, model=model)
############# Time to get feature importances based on full train data ################
if multi_label:
for i,target_name in enumerate(targets):
print('Top 10 features for {}: {}'.format(target_name,pd.DataFrame(model.estimators_[i].feature_importances_,
index=model.estimators_[i].feature_name_,
columns=['importance']).sort_values('importance', ascending=False).index.tolist()[:10]))
else:
print('Top 10 features:\n', pd.DataFrame(model.feature_importances_,index=model.feature_name_,
columns=['importance']).sort_values('importance', ascending=False).index.tolist()[:10])
###### Time to consolidate the predictions on test data #########
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
if log_y:
pred_xgbs = np.exp(model.predict(x_test))
else:
pred_xgbs = model.predict(x_test)
#### if there is no test data just return empty strings ###
else:
pred_xgbs = []
else:
if not isinstance(X_XGB_test, str):
if not multi_label:
pred_xgbs = model.predict(x_test)
pred_probas = model.predict_proba(x_test)
else:
### This is how you have to process if it is multi_label ##
pred_probas = model.predict_proba(x_test)
predsy = [np.argmax(line,axis=1) for line in pred_probas]
pred_xgbs = np.array(predsy)
else:
pred_xgbs = []
pred_probas = []
##### once the entire model is trained on full train data ##################
print(' Time taken for training Light GBM on entire train data (in minutes) = %0.1f' %(
(time.time()-start_time)/60))
if multi_label:
for i,target_name in enumerate(targets):
lgbm.plot_importance(model.estimators_[i], importance_type='gain', max_num_features=top_num,
title='LGBM model feature importances for %s' %target_name)
else:
lgbm.plot_importance(model, importance_type='gain', max_num_features=top_num,
title='LGBM final model feature importances')
print('Returning the following:')
print(' Model = %s' %model)
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
print(' final predictions', pred_xgbs[:10])
return (pred_xgbs, model)
else:
if not isinstance(X_XGB_test, str):
print(' final predictions (may need to be transformed to original labels)', pred_xgbs[:10])
print(' predicted probabilities', pred_probas[:1])
return (pred_xgbs, pred_probas, model)
##################################################################################
def simple_LightGBM_model(X_train, y_train, X_test, log_y=False, GPU_flag=False,
scaler = '', enc_method='label', n_splits=5, verbose=-1):
"""
Easy to use XGBoost model. Just send in X_train, y_train and what you want to predict, X_test
It will automatically split X_train into multiple folds (10) and train and predict each time on X_test.
It will then use average (or use mode) to combine the results and give you a y_test.
You just need to give the modeltype as "Regression" or 'Classification'
Inputs:
------------
X_train: pandas dataframe only: do not send in numpy arrays. This is the X_train of your dataset.
y_train: pandas Series or DataFrame only: do not send in numpy arrays. This is the y_train of your dataset.
X_test: pandas dataframe only: do not send in numpy arrays. This is the X_test of your dataset.
modeltype: can only be 'Regression' or 'Classification'
log_y: default = False: If True, it means use the log of the target variable "y" to train and test.
GPU_flag: if your machine has a GPU set this flag and it will use XGBoost GPU to speed up processing.
scaler : default is StandardScaler(). But you can send in MinMaxScaler() as input to change it or any other scaler.
enc_method: default is 'label' encoding. But you can choose 'glmm' as an alternative. But those are the only two.
verbose: default = 0. Choosing 1 will give you lot more output.
Outputs:
------------
y_preds: Predicted values for your X_XGB_test dataframe.
It has been averaged after repeatedly predicting on X_XGB_test. So likely to be better than one model.
"""
X_XGB = copy.deepcopy(X_train)
Y_XGB = copy.deepcopy(y_train)
X_XGB_test = copy.deepcopy(X_test)
#######################################
start_time = time.time()
if isinstance(Y_XGB, pd.Series):
targets = [Y_XGB.name]
else:
targets = Y_XGB.columns.tolist()
if len(targets) == 1:
multi_label = False
if isinstance(Y_XGB, pd.DataFrame):
Y_XGB = pd.Series(Y_XGB.values.ravel(),name=targets[0], index=Y_XGB.index)
else:
multi_label = True
print('Multi_label is not supported in simple_LightGBM_model. Try the complex_LightGBM_model...Returning')
return {}
##### Start your analysis of the data ############
modeltype = analyze_problem_type(Y_XGB, targets)
columns = X_XGB.columns
#### In some cases, there are special chars in column names. Remove them. ###
if np.array([':' in x for x in columns]).any():
sel_preds = columns[np.array([':' in x for x in columns])].tolist()
print('removing special char : in %s since LightGBM does not like it...' %sel_preds)
columns = ["_".join(x.split(":")) for x in columns]
X_XGB.columns = columns
if not isinstance(X_XGB_test, str):
X_XGB_test.columns = columns
###################################################################################
######### S C A L E R P R O C E S S I N G B E G I N S ############
###################################################################################
if isinstance(scaler, str):
if not scaler == '':
scaler = scaler.lower()
if scaler == 'standard':
scaler = StandardScaler()
elif scaler == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
else:
scaler = StandardScaler()
else:
pass
######### G P U P R O C E S S I N G B E G I N S ############
###### This is where we set the CPU and GPU parameters for XGBoost
if GPU_flag:
GPU_exists = check_if_GPU_exists()
else:
GPU_exists = False
##### Set the Scoring Parameters here based on each model and preferences of user ###
cpu_params = {}
param = {}
cpu_params['tree_method'] = 'hist'
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
if GPU_exists:
param['tree_method'] = 'gpu_hist'
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
print(' Hyper Param Tuning LightGBM with GPU parameters. This will take time. Please be patient...')
else:
param = copy.deepcopy(cpu_params)
print(' Hyper Param Tuning LightGBM with CPU parameters. This will take time. Please be patient...')
#################################################################################
if modeltype == 'Regression':
if log_y:
Y_XGB.loc[Y_XGB==0] = 1e-15 ### just set something that is zero to a very small number
#testing for GPU
hyper_frac = 0.2
#### now select a random sample from X_XGB and Y_XGB ################
if modeltype == 'Regression':
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=99)
else:
X_train, X_valid, Y_train, Y_valid = train_test_split(X_XGB, Y_XGB, test_size=hyper_frac,
random_state=99, stratify=Y_XGB)
scoreFunction = { "precision": "precision_weighted","recall": "recall_weighted"}
X_train, Y_train, X_valid, Y_valid = data_transform(X_train, Y_train, X_valid, Y_valid,
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
if modeltype == 'Regression':
if log_y:
Y_train, Y_valid = np.log(Y_train), np.log(Y_valid)
random_search_flag = True
gbm_model = lightgbm_model_fit(random_search_flag, X_train, Y_train, X_valid, Y_valid, modeltype,
multi_label, log_y, model="")
model = gbm_model.best_estimator_
random_search_flag = False
#############################################################################
ls=[]
if modeltype == 'Regression':
fold = KFold(n_splits=n_splits)
else:
fold = StratifiedKFold(shuffle=True, n_splits=n_splits, random_state=99)
scores=[]
if not isinstance(X_XGB_test, str):
pred_xgbs = np.zeros(len(X_XGB_test))
pred_probas = np.zeros(len(X_XGB_test))
else:
pred_xgbs = []
pred_probas = []
#### First convert test data into numeric using train data ###
if not isinstance(X_XGB_test, str):
X_XGB_train_enc,_, X_XGB_test_enc,_ = data_transform(X_XGB, Y_XGB, X_XGB_test, "",
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
#### now run all the folds each one by one ##################################
start_time = time.time()
for folds, (train_index, test_index) in tqdm(enumerate(fold.split(X_XGB,Y_XGB))):
x_train, x_test = X_XGB.iloc[train_index], X_XGB.iloc[test_index]
### you need to keep y_test as-is in the same original state as it was given ####
y_test = Y_XGB.iloc[test_index]
### y_valid here will be transformed into log_y to ensure training and validation ####
if modeltype == 'Regression':
if log_y:
y_train, y_valid = np.log(Y_XGB.iloc[train_index]), np.log(Y_XGB.iloc[test_index])
else:
y_train, y_valid = Y_XGB.iloc[train_index], Y_XGB.iloc[test_index]
else:
y_train, y_valid = Y_XGB.iloc[train_index], Y_XGB.iloc[test_index]
## scale the x_train and x_test values - use all columns -
x_train, y_train, x_test, _ = data_transform(x_train, y_train, x_test, y_test,
modeltype, multi_label, scaler=scaler, enc_method=enc_method)
model = gbm_model.best_estimator_
model = lightgbm_model_fit(random_search_flag, x_train, y_train, x_test, y_valid, modeltype,
multi_label, log_y, model=model)
#### now make predictions on validation data and compare it to y_test which is in original state ##
if modeltype == 'Regression':
if log_y:
preds = np.exp(model.predict(x_test))
else:
preds = model.predict(x_test)
else:
preds = model.predict(x_test)
feature_importances = pd.DataFrame(model.feature_importances_,
index = X_XGB.columns,
columns=['importance'])
sum_all=feature_importances.values
ls.append(sum_all)
###### Time to consolidate the predictions on test data #########
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
if log_y:
pred_xgb=np.exp(model.predict(X_XGB_test_enc[columns]))
else:
pred_xgb=model.predict(X_XGB_test_enc[columns])
pred_xgbs = np.vstack([pred_xgbs, pred_xgb])
pred_xgbs = pred_xgbs.mean(axis=0)
#### preds here is for only one fold and we are comparing it to original y_test ####
score = np.sqrt(mean_squared_error(y_test, preds))
print('RMSE score in fold %d = %s' %(folds+1, score))
else:
if not isinstance(X_XGB_test, str):
pred_xgb=model.predict(X_XGB_test_enc[columns])
pred_proba = model.predict_proba(X_XGB_test_enc[columns])
if folds == 0:
pred_xgbs = copy.deepcopy(pred_xgb)
pred_probas = copy.deepcopy(pred_proba)
else:
pred_xgbs = np.vstack([pred_xgbs, pred_xgb])
pred_xgbs = stats.mode(pred_xgbs, axis=0)[0][0]
pred_probas = np.mean( np.array([ pred_probas, pred_proba ]), axis=0 )
#### preds here is for only one fold and we are comparing it to original y_test ####
score = balanced_accuracy_score(y_test, preds)
print('AUC score in fold %d = %0.1f%%' %(folds+1, score*100))
scores.append(score)
print("\nCross-validated average scores are: ", np.sum(scores)/len(scores))
############# F I N A L T R A I N I N G ###################################
print('Training model on full train dataset...')
start_time1 = time.time()
model = gbm_model.best_estimator_
model.fit(X_XGB_train_enc, Y_XGB)
if not isinstance(X_XGB_test, str):
pred_xgbs = model.predict(X_XGB_test_enc)
if modeltype != 'Regression':
pred_probas = model.predict_proba(X_XGB_test_enc)
else:
pred_probas = np.array([])
else:
pred_xgbs = np.array([])
pred_probas = np.array([])
print(' Time taken for training LightGBM (in minutes) = %0.1f' %((time.time()-start_time1)/60))
if verbose:
plot_importances_XGB(train_set=X_XGB, labels=Y_XGB, ls=ls, y_preds=pred_xgbs,
modeltype=modeltype, top_num='all')
print('Returning the following:')
if modeltype == 'Regression':
if not isinstance(X_XGB_test, str):
print(' final predictions', pred_xgbs[:10])
else:
print(' no X_test given. Returning empty array.')
print(' Model = %s' %model)
return (pred_xgbs, model)
else:
if not isinstance(X_XGB_test, str):
print(' final predictions (may need to be transformed to original labels)', pred_xgbs[:10])
print(' predicted probabilities', pred_probas[:1])
else:
print(' no X_test given. Returning empty array.')
print(' Model = %s' %model)
return (pred_xgbs, pred_probas, model)
########################################################################################
def plot_importances_XGB(train_set, labels, ls, y_preds, modeltype, top_num='all'):
add_items=0
for item in ls:
add_items +=item
if isinstance(top_num, str):
feat_imp=pd.DataFrame(add_items/len(ls),index=train_set.columns,
columns=["importance"]).sort_values('importance', ascending=False)
feat_imp2=feat_imp[feat_imp>0.00005]
#df_cv=df_cv.reset_index()
#### don't add [:top_num] at the end of this statement since it will error #######
#feat_imp = pd.Series(df_cv.importance.values,
# index=df_cv.drop(["importance"], axis=1)).sort_values(axis='index',ascending=False)
else:
## this limits the number of items to the top_num items
feat_imp=pd.DataFrame(add_items/len(ls),index=train_set.columns[:top_num],
columns=["importance"]).sort_values('importance', ascending=False)
feat_imp2=feat_imp[feat_imp>0.00005]
#df_cv=df_cv.reset_index()
#feat_imp = pd.Series(df_cv.importance.values,
# index=df_cv.drop(["importance"], axis=1)).sort_values(axis='index',ascending=False)[:top_num]
##### Now plot the feature importances #################
imp_columns=[]
for item in | pd.DataFrame(feat_imp2) | pandas.DataFrame |
# coding: utf-8
# # Online Retail
#
# - http://archive.ics.uci.edu/ml/datasets/online+retail#
#
#
# ## Data Set Information:
#
# This is a transnational data set which contains all the transactions occurring between 01/12/2010 and 09/12/2011 for a UK-based and registered non-store online retail.The company mainly sells unique all-occasion gifts. Many customers of the company are wholesalers.
#
#
# ## Attribute Information:
#
# - InvoiceNo: Invoice number. Nominal, a 6-digit integral number uniquely assigned to each transaction. If this code starts with letter 'c', it indicates a cancellation.
# - StockCode: Product (item) code. Nominal, a 5-digit integral number uniquely assigned to each distinct product.
# - Description: Product (item) name. Nominal.
# - Quantity: The quantities of each product (item) per transaction. Numeric.
# - InvoiceDate: Invice Date and time. Numeric, the day and time when each transaction was generated.
# - UnitPrice: Unit price. Numeric, Product price per unit in sterling.
# - CustomerID: Customer number. Nominal, a 5-digit integral number uniquely assigned to each customer.
# - Country: Country name. Nominal, the name of the country where each customer resides.
#
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
import itertools
import operator
import statsmodels.api as sm
# In[2]:
online_retail = pd.read_excel('data/Online Retail.xlsx')
# In[3]:
online_retail.describe()
# In[4]:
online_retail.head()
# In[5]:
online_retail['InvoiceDate'] = online_retail['InvoiceDate'].astype('datetime64[ns]')
online_retail['TotalPrice'] = online_retail['Quantity'] * online_retail['UnitPrice']
# In[6]:
online_retail.head()
# In[7]:
online_retail.info()
# In[8]:
(online_retail['CustomerID'].isnull()).any()
# In[9]:
online_retail[online_retail['CustomerID'].isnull()]
# In[10]:
#calculate revenue? total sum of the price
online_retail.set_index('InvoiceDate', inplace=True)
# In[11]:
# http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
y = online_retail['TotalPrice'].resample('D').sum()
# In[12]:
y.head()
# In[13]:
y = y.fillna(y.bfill())
# In[14]:
y.head()
# In[15]:
y.isnull().any()
# In[16]:
y.plot(figsize=(15,6))
plt.show()
# In[17]:
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
# try adjust the `s` parameter
s = 30
seasonal_pdq = [(x[0], x[1], x[2], s) for x in list(itertools.product(p, d, q))]
# In[18]:
print('Example of parameter conbination for Seasonal ARIMA')
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[2]))
# In[19]:
warnings.filterwarnings('ignore')
history = {}
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
history[(param, param_seasonal)] = results.aic
print('ARIMA{}x{} - AIC: {}'.format(param, param_seasonal, results.aic))
except:
continue
# Get the combination that results the minimum AIC
# In[20]:
sorted_x = sorted(history.items(), key=operator.itemgetter(1))
# In[21]:
param, param_seasonal = sorted_x[0][0][0], sorted_x[0][0][1]
# In[22]:
print(param)
print(param_seasonal)
# In[23]:
model = sm.tsa.statespace.SARIMAX(y,
order = param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = model.fit()
# In[24]:
print(results.summary())
# In[25]:
# http://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAXResults.plot_diagnostics.html
results.plot_diagnostics(lags=1, figsize=(15,6))
plt.show()
# ## Validating Forecasts
#
# - one-step ahead forecast
# - dynamic forecast
# ### One-step ahead forecast
# In[26]:
start_date = '2011-05-02'
pred = results.get_prediction(start= | pd.to_datetime(start_date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 8 10:39:33 2018
@author: jimmybow
"""
from dash import Dash
from dash.dependencies import Input, State, Output
from .Dash_fun import apply_layout_with_auth
import dash_core_components as dcc
import dash_html_components as html
from flask_login import current_user
import os
import plotly.graph_objs as go
import pandas as pd
import sqlite3
import geopandas as gpd
from geopandas import GeoDataFrame
import json
from shapely.geometry import mapping
import dash_table
from plotly import tools
import datetime
import dateutil.parser
import pytz
from collections import Iterable # < py38
import numpy as np
import time
url_base = '/dashboards/app1/'
mapbox_access_token = '<KEY>'
conn = sqlite3.connect('app/HYDRO-dev2.db')
df = | pd.read_sql('SELECT * FROM META_STATION_BASSIN', conn) | pandas.read_sql |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with | option_context("display.width", None) | pandas.core.config.option_context |
import datetime as dt
import os
from os.path import join, normpath
import pandas as pd
class OutputProcessor(object):
def __init__(self, output_dir: str, output_name: str) -> None:
"""
Output processor manages output data
"""
self.output_dir = output_dir
self.output_file = output_name
self.write_path = normpath(join(output_dir, output_name))
self.df = pd.DataFrame()
self.idx_count = 0
def collect_output(self, data_dict: dict) -> None:
"""
Collect output data and log it in a DataFrame until it's written to a file.
:param data_dict: dictionary of data to be logged
"""
df_temp = pd.DataFrame(data_dict, index=[self.idx_count])
self.df = | pd.concat([self.df, df_temp], axis=0, sort=True) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
"""
tqsdk.ta 模块包含了一批常用的技术指标计算函数
"""
import numpy as np
import pandas as pd
import numba
from tqsdk import ta_func
def ATR(df, n):
"""平均真实波幅"""
new_df = pd.DataFrame()
pre_close = df["close"].shift(1)
new_df["tr"] = np.where(df["high"] - df["low"] > np.absolute(pre_close - df["high"]),
np.where(df["high"] - df["low"] > np.absolute(pre_close - df["low"]),
df["high"] - df["low"], np.absolute(pre_close - df["low"])),
np.where(np.absolute(pre_close - df["high"]) > np.absolute(pre_close - df["low"]),
np.absolute(pre_close - df["high"]), np.absolute(pre_close - df["low"])))
new_df["atr"] = ta_func.ma(new_df["tr"], n)
return new_df
def BIAS(df, n):
"""乖离率"""
ma1 = ta_func.ma(df["close"], n)
new_df = pd.DataFrame(data=list((df["close"] - ma1) / ma1 * 100), columns=["bias"])
return new_df
def BOLL(df, n, p):
"""布林线"""
new_df = pd.DataFrame()
mid = ta_func.ma(df["close"], n)
std = df["close"].rolling(n).std()
new_df["mid"] = mid
new_df["top"] = mid + p * std
new_df["bottom"] = mid - p * std
return new_df
def DMI(df, n, m):
"""动向指标"""
new_df = pd.DataFrame()
new_df["atr"] = ATR(df, n)["atr"]
pre_high = df["high"].shift(1)
pre_low = df["low"].shift(1)
hd = df["high"] - pre_high
ld = pre_low - df["low"]
admp = ta_func.ma(pd.Series(np.where((hd > 0) & (hd > ld), hd, 0)), n)
admm = ta_func.ma(pd.Series(np.where((ld > 0) & (ld > hd), ld, 0)), n)
new_df["pdi"] = pd.Series(np.where(new_df["atr"] > 0, admp / new_df["atr"] * 100, np.NaN)).ffill()
new_df["mdi"] = pd.Series(np.where(new_df["atr"] > 0, admm / new_df["atr"] * 100, np.NaN)).ffill()
ad = pd.Series(np.absolute(new_df["mdi"] - new_df["pdi"]) / (new_df["mdi"] + new_df["pdi"]) * 100)
new_df["adx"] = ta_func.ma(ad, m)
new_df["adxr"] = (new_df["adx"] + new_df["adx"].shift(m)) / 2
return new_df
def KDJ(df, n, m1, m2):
"""随机指标"""
new_df = pd.DataFrame()
hv = df["high"].rolling(n).max()
lv = df["low"].rolling(n).min()
rsv = pd.Series(np.where(hv == lv, 0, (df["close"] - lv) / (hv - lv) * 100))
new_df["k"] = ta_func.sma(rsv, m1, 1)
new_df["d"] = ta_func.sma(new_df["k"], m2, 1)
new_df["j"] = 3 * new_df["k"] - 2 * new_df["d"]
return new_df
def MACD(df, short, long, m):
"""异同移动平均线"""
new_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
trainData = pd.read_csv('../input/train.csv')
testData = pd.read_csv('../input/test.csv')
trainData = trainData.drop('Id', axis=1)
testData = testData.drop('Id', axis=1)
# In[2]:
trainData['Open Date'] = pd.to_datetime(trainData['Open Date'], format='%m/%d/%Y')
testData['Open Date'] = pd.to_datetime(testData['Open Date'], format='%m/%d/%Y')
trainData['OpenDays']=""
testData['OpenDays']=""
dateLastTrain = pd.DataFrame({'Date':np.repeat(['01/01/2015'],[len(trainData)]) })
dateLastTrain['Date'] = pd.to_datetime(dateLastTrain['Date'], format='%m/%d/%Y')
dateLastTest = pd.DataFrame({'Date':np.repeat(['01/01/2015'],[len(testData)]) })
dateLastTest['Date'] = pd.to_datetime(dateLastTest['Date'], format='%m/%d/%Y')
trainData['OpenDays'] = dateLastTrain['Date'] - trainData['Open Date']
testData['OpenDays'] = dateLastTest['Date'] - testData['Open Date']
trainData['OpenDays'] = trainData['OpenDays'].astype('timedelta64[D]').astype(int)
testData['OpenDays'] = testData['OpenDays'].astype('timedelta64[D]').astype(int)
trainData = trainData.drop('Open Date', axis=1)
testData = testData.drop('Open Date', axis=1)
# In[3]:
cityPerc = trainData[["City Group", "revenue"]].groupby(['City Group'],as_index=False).mean()
#sns.barplot(x='City Group', y='revenue', data=cityPerc)
citygroupDummy = pd.get_dummies(trainData['City Group'])
trainData = trainData.join(citygroupDummy)
citygroupDummyTest = | pd.get_dummies(testData['City Group']) | pandas.get_dummies |
import pytest
import os
import pandas as pd
from playgen import playsampler
from playgen.exceptions import InsufficientDataException
# @pytest.fixture
# def full_pbp_df():
# dirname = os.path.dirname(__file__)
# filename = os.path.join(dirname, 'data/testdata.csv')
# return pd.read_csv(filename)
@pytest.fixture
def small_pbp_df():
data = [
["J.Doe pass short left", 2.0, 2.8, 'CLE_DEN', 2507.0, 0, 1, 0.0, 'pass', 0, 1.0, 70.0, 4]
]
columns = ['desc', 'down', 'epa', 'game_id', 'game_seconds_remaining', 'goal_to_go', 'pass', 'penalty',
'play_type', 'rush', 'success', 'yardline_100', 'ydstogo']
df = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.